Let' s Talk About Unspeakable Things
@@ -89,6 +90,16 @@
transcribing
+
+
+
+
+
diff --git a/project/audio/description-alex1-A.srt b/project/audio/description-alex1-A.srt
new file mode 100644
index 0000000..0f0434a
--- /dev/null
+++ b/project/audio/description-alex1-A.srt
@@ -0,0 +1,70 @@
+1
+00:00:11,740 --> 00:00:18,060
+Em... I hear... It sounds like walking and it's quite windy outside
+
+2
+00:00:18,620 --> 00:00:25,460
+But it sounds like... It's... The microphone is inside or something
+
+3
+00:00:26,980 --> 00:00:29,340
+It's probably music
+
+4
+00:00:30,320 --> 00:00:35,700
+From a car maybe or from some radio?
+
+5
+00:00:37,600 --> 00:00:42,400
+It's outside on the street. I hear some cars in the background
+
+6
+00:00:48,700 --> 00:00:53,600
+Mmmm... It sounds like gardening sounds
+
+7
+00:00:54,820 --> 00:00:59,040
+There are people in the background or maybe it's bikes
+
+8
+00:01:05,840 --> 00:01:08,500
+Oh I hear a bird in the background
+
+9
+00:01:12,120 --> 00:01:16,440
+A very monotone sound
+
+10
+00:01:16,740 --> 00:01:21,180
+It's like driving, maybe driving in a car
+
+11
+00:01:35,120 --> 00:01:36,580
+And some wind around
+It seems like outside but there is some noise in the background
+
+12
+00:01:36,580 --> 00:01:36,600
+It seems like outside but there is some noise in the background
+
+13
+00:01:46,340 --> 00:01:48,460
+It's outside
+
+14
+00:02:04,920 --> 00:02:04,940
+Oh, I hear children in the background crying
+
+15
+00:02:04,940 --> 00:02:08,360
+There is some weird sound in the foreground, which I... It's hard to describe [chuckling]
+Oh, I hear children in the background crying
+
+16
+00:02:08,360 --> 00:02:08,440
+There is some weird sound in the foreground, which I... It's hard to describe [chuckling]
+
+17
+00:02:14,900 --> 00:02:19,580
+Again it sounds like there is a kindergarten or school nearby
+
diff --git a/project/overlapping-interface.html b/project/overlapping-interface.html
index 8286b30..3408a6d 100644
--- a/project/overlapping-interface.html
+++ b/project/overlapping-interface.html
@@ -15,6 +15,27 @@
a {
text-decoration:none;
}
+ #transcriptWrapper {
+ overflow: hidden;
+}
+ #transcriptWrapper3 {
+ overflow: hidden;
+}
+#transcript3 > div {
+ transition: all .8s ease;
+ list-style-type: disc;
+}
+.speaking3 {
+ font-weight:bold
+}
+
+#transcript > div {
+ transition: all .8s ease;
+ list-style-type: disc;
+}
+.speaking {
+ font-weight:bold
+}
@@ -26,19 +47,17 @@
-
-
-
+
+
+
+
@@ -54,8 +73,9 @@ Experiment: I listen and record carefully sounds from the area that are coming f
@@ -194,7 +216,7 @@ a1.addEventListener("play", function(){
})
a1.addEventListener("pause", function(){
- but.innerHTML="play"
+ but.innerHTML="listen"
})
var links=document.querySelectorAll("div.sub a")
@@ -216,8 +238,47 @@ for (var i=0, l=links.length; i
+
+
+
+
+
+
+
diff --git a/project/venv/bin/activate b/project/venv/bin/activate
new file mode 100644
index 0000000..4651d62
--- /dev/null
+++ b/project/venv/bin/activate
@@ -0,0 +1,78 @@
+# This file must be used with "source bin/activate" *from bash*
+# you cannot run it directly
+
+deactivate () {
+ unset -f pydoc >/dev/null 2>&1
+
+ # reset old environment variables
+ # ! [ -z ${VAR+_} ] returns true if VAR is declared at all
+ if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then
+ PATH="$_OLD_VIRTUAL_PATH"
+ export PATH
+ unset _OLD_VIRTUAL_PATH
+ fi
+ if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
+ PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
+ export PYTHONHOME
+ unset _OLD_VIRTUAL_PYTHONHOME
+ fi
+
+ # This should detect bash and zsh, which have a hash command that must
+ # be called to get it to forget past commands. Without forgetting
+ # past commands the $PATH changes we made may not be respected
+ if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
+ hash -r 2>/dev/null
+ fi
+
+ if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
+ PS1="$_OLD_VIRTUAL_PS1"
+ export PS1
+ unset _OLD_VIRTUAL_PS1
+ fi
+
+ unset VIRTUAL_ENV
+ if [ ! "${1-}" = "nondestructive" ] ; then
+ # Self destruct!
+ unset -f deactivate
+ fi
+}
+
+# unset irrelevant variables
+deactivate nondestructive
+
+VIRTUAL_ENV="/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv"
+export VIRTUAL_ENV
+
+_OLD_VIRTUAL_PATH="$PATH"
+PATH="$VIRTUAL_ENV/bin:$PATH"
+export PATH
+
+# unset PYTHONHOME if set
+if ! [ -z "${PYTHONHOME+_}" ] ; then
+ _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
+ unset PYTHONHOME
+fi
+
+if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
+ _OLD_VIRTUAL_PS1="${PS1-}"
+ if [ "x" != x ] ; then
+ PS1="${PS1-}"
+ else
+ PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}"
+ fi
+ export PS1
+fi
+
+# Make sure to unalias pydoc if it's already there
+alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true
+
+pydoc () {
+ python -m pydoc "$@"
+}
+
+# This should detect bash and zsh, which have a hash command that must
+# be called to get it to forget past commands. Without forgetting
+# past commands the $PATH changes we made may not be respected
+if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
+ hash -r 2>/dev/null
+fi
diff --git a/project/venv/bin/activate.csh b/project/venv/bin/activate.csh
new file mode 100644
index 0000000..f88e00e
--- /dev/null
+++ b/project/venv/bin/activate.csh
@@ -0,0 +1,42 @@
+# This file must be used with "source bin/activate.csh" *from csh*.
+# You cannot run it directly.
+# Created by Davide Di Blasi .
+
+set newline='\
+'
+
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+setenv VIRTUAL_ENV "/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv"
+
+set _OLD_VIRTUAL_PATH="$PATH:q"
+setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q"
+
+
+
+if ("" != "") then
+ set env_name = ""
+else
+ set env_name = "$VIRTUAL_ENV:t:q"
+endif
+
+# Could be in a non-interactive environment,
+# in which case, $prompt is undefined and we wouldn't
+# care about the prompt anyway.
+if ( $?prompt ) then
+ set _OLD_VIRTUAL_PROMPT="$prompt:q"
+if ( "$prompt:q" =~ *"$newline:q"* ) then
+ :
+else
+ set prompt = "[$env_name:q] $prompt:q"
+endif
+endif
+
+unset env_name
+
+alias pydoc python -m pydoc
+
+rehash
diff --git a/project/venv/bin/activate.fish b/project/venv/bin/activate.fish
new file mode 100644
index 0000000..35e7e19
--- /dev/null
+++ b/project/venv/bin/activate.fish
@@ -0,0 +1,101 @@
+# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
+# Do not run it directly.
+
+function _bashify_path -d "Converts a fish path to something bash can recognize"
+ set fishy_path $argv
+ set bashy_path $fishy_path[1]
+ for path_part in $fishy_path[2..-1]
+ set bashy_path "$bashy_path:$path_part"
+ end
+ echo $bashy_path
+end
+
+function _fishify_path -d "Converts a bash path to something fish can recognize"
+ echo $argv | tr ':' '\n'
+end
+
+function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
+ # reset old environment variables
+ if test -n "$_OLD_VIRTUAL_PATH"
+ # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
+ if test (echo $FISH_VERSION | tr "." "\n")[1] -lt 3
+ set -gx PATH (_fishify_path $_OLD_VIRTUAL_PATH)
+ else
+ set -gx PATH $_OLD_VIRTUAL_PATH
+ end
+ set -e _OLD_VIRTUAL_PATH
+ end
+
+ if test -n "$_OLD_VIRTUAL_PYTHONHOME"
+ set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
+ set -e _OLD_VIRTUAL_PYTHONHOME
+ end
+
+ if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
+ # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
+ set -l fish_function_path
+
+ # Erase virtualenv's `fish_prompt` and restore the original.
+ functions -e fish_prompt
+ functions -c _old_fish_prompt fish_prompt
+ functions -e _old_fish_prompt
+ set -e _OLD_FISH_PROMPT_OVERRIDE
+ end
+
+ set -e VIRTUAL_ENV
+
+ if test "$argv[1]" != 'nondestructive'
+ # Self-destruct!
+ functions -e pydoc
+ functions -e deactivate
+ functions -e _bashify_path
+ functions -e _fishify_path
+ end
+end
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+set -gx VIRTUAL_ENV "/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv"
+
+# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
+if test (echo $FISH_VERSION | tr "." "\n")[1] -lt 3
+ set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH)
+else
+ set -gx _OLD_VIRTUAL_PATH $PATH
+end
+set -gx PATH "$VIRTUAL_ENV/bin" $PATH
+
+# Unset `$PYTHONHOME` if set.
+if set -q PYTHONHOME
+ set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
+ set -e PYTHONHOME
+end
+
+function pydoc
+ python -m pydoc $argv
+end
+
+if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
+ # Copy the current `fish_prompt` function as `_old_fish_prompt`.
+ functions -c fish_prompt _old_fish_prompt
+
+ function fish_prompt
+ # Save the current $status, for fish_prompts that display it.
+ set -l old_status $status
+
+ # Prompt override provided?
+ # If not, just prepend the environment name.
+ if test -n ""
+ printf '%s%s' "" (set_color normal)
+ else
+ printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV")
+ end
+
+ # Restore the original $status
+ echo "exit $old_status" | source
+ _old_fish_prompt
+ end
+
+ set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
+end
diff --git a/project/venv/bin/activate.ps1 b/project/venv/bin/activate.ps1
new file mode 100644
index 0000000..6d8ae2a
--- /dev/null
+++ b/project/venv/bin/activate.ps1
@@ -0,0 +1,60 @@
+# This file must be dot sourced from PoSh; you cannot run it directly. Do this: . ./activate.ps1
+
+$script:THIS_PATH = $myinvocation.mycommand.path
+$script:BASE_DIR = split-path (resolve-path "$THIS_PATH/..") -Parent
+
+function global:deactivate([switch] $NonDestructive)
+{
+ if (test-path variable:_OLD_VIRTUAL_PATH)
+ {
+ $env:PATH = $variable:_OLD_VIRTUAL_PATH
+ remove-variable "_OLD_VIRTUAL_PATH" -scope global
+ }
+
+ if (test-path function:_old_virtual_prompt)
+ {
+ $function:prompt = $function:_old_virtual_prompt
+ remove-item function:\_old_virtual_prompt
+ }
+
+ if ($env:VIRTUAL_ENV)
+ {
+ $old_env = split-path $env:VIRTUAL_ENV -leaf
+ remove-item env:VIRTUAL_ENV -erroraction silentlycontinue
+ }
+
+ if (!$NonDestructive)
+ {
+ # Self destruct!
+ remove-item function:deactivate
+ remove-item function:pydoc
+ }
+}
+
+function global:pydoc
+{
+ python -m pydoc $args
+}
+
+# unset irrelevant variables
+deactivate -nondestructive
+
+$VIRTUAL_ENV = $BASE_DIR
+$env:VIRTUAL_ENV = $VIRTUAL_ENV
+
+$global:_OLD_VIRTUAL_PATH = $env:PATH
+$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
+if (!$env:VIRTUAL_ENV_DISABLE_PROMPT)
+{
+ function global:_old_virtual_prompt
+ {
+ ""
+ }
+ $function:_old_virtual_prompt = $function:prompt
+ function global:prompt
+ {
+ # Add a prefix to the current prompt, but don't discard it.
+ write-host "($( split-path $env:VIRTUAL_ENV -leaf )) " -nonewline
+ & $function:_old_virtual_prompt
+ }
+}
diff --git a/project/venv/bin/activate_this.py b/project/venv/bin/activate_this.py
new file mode 100644
index 0000000..59b5d72
--- /dev/null
+++ b/project/venv/bin/activate_this.py
@@ -0,0 +1,46 @@
+"""Activate virtualenv for current interpreter:
+
+Use exec(open(this_file).read(), {'__file__': this_file}).
+
+This can be used when you must use an existing Python interpreter, not the virtualenv bin/python.
+"""
+import os
+import site
+import sys
+
+try:
+ __file__
+except NameError:
+ raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))")
+
+# prepend bin to PATH (this file is inside the bin directory)
+bin_dir = os.path.dirname(os.path.abspath(__file__))
+os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
+
+base = os.path.dirname(bin_dir)
+
+# virtual env is right above bin directory
+os.environ["VIRTUAL_ENV"] = base
+
+# add the virtual environments site-package to the host python import mechanism
+IS_PYPY = hasattr(sys, "pypy_version_info")
+IS_JYTHON = sys.platform.startswith("java")
+if IS_JYTHON:
+ site_packages = os.path.join(base, "Lib", "site-packages")
+elif IS_PYPY:
+ site_packages = os.path.join(base, "site-packages")
+else:
+ IS_WIN = sys.platform == "win32"
+ if IS_WIN:
+ site_packages = os.path.join(base, "Lib", "site-packages")
+ else:
+ site_packages = os.path.join(base, "lib", "python{}".format(sys.version[:3]), "site-packages")
+
+prev = set(sys.path)
+site.addsitedir(site_packages)
+sys.real_prefix = sys.prefix
+sys.prefix = base
+
+# Move the added items to the front of the path, in place
+new = list(sys.path)
+sys.path[:] = [i for i in new if i not in prev] + [i for i in new if i in prev]
diff --git a/project/venv/bin/easy_install b/project/venv/bin/easy_install
new file mode 100755
index 0000000..e70a64d
--- /dev/null
+++ b/project/venv/bin/easy_install
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from setuptools.command.easy_install import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/easy_install-2.7 b/project/venv/bin/easy_install-2.7
new file mode 100755
index 0000000..e70a64d
--- /dev/null
+++ b/project/venv/bin/easy_install-2.7
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from setuptools.command.easy_install import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/f2py b/project/venv/bin/f2py
new file mode 100755
index 0000000..a33f775
--- /dev/null
+++ b/project/venv/bin/f2py
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from numpy.f2py.f2py2e import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/f2py2 b/project/venv/bin/f2py2
new file mode 100755
index 0000000..a33f775
--- /dev/null
+++ b/project/venv/bin/f2py2
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from numpy.f2py.f2py2e import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/f2py2.7 b/project/venv/bin/f2py2.7
new file mode 100755
index 0000000..a33f775
--- /dev/null
+++ b/project/venv/bin/f2py2.7
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from numpy.f2py.f2py2e import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/pip b/project/venv/bin/pip
new file mode 100755
index 0000000..881f992
--- /dev/null
+++ b/project/venv/bin/pip
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/pip2 b/project/venv/bin/pip2
new file mode 100755
index 0000000..881f992
--- /dev/null
+++ b/project/venv/bin/pip2
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/pip2.7 b/project/venv/bin/pip2.7
new file mode 100755
index 0000000..881f992
--- /dev/null
+++ b/project/venv/bin/pip2.7
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/bin/python b/project/venv/bin/python
new file mode 100755
index 0000000..39388a1
Binary files /dev/null and b/project/venv/bin/python differ
diff --git a/project/venv/bin/python-config b/project/venv/bin/python-config
new file mode 100755
index 0000000..ee561ac
--- /dev/null
+++ b/project/venv/bin/python-config
@@ -0,0 +1,78 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+
+import sys
+import getopt
+import sysconfig
+
+valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
+ 'ldflags', 'help']
+
+if sys.version_info >= (3, 2):
+ valid_opts.insert(-1, 'extension-suffix')
+ valid_opts.append('abiflags')
+if sys.version_info >= (3, 3):
+ valid_opts.append('configdir')
+
+
+def exit_with_usage(code=1):
+ sys.stderr.write("Usage: {0} [{1}]\n".format(
+ sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
+ sys.exit(code)
+
+try:
+ opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
+except getopt.error:
+ exit_with_usage()
+
+if not opts:
+ exit_with_usage()
+
+pyver = sysconfig.get_config_var('VERSION')
+getvar = sysconfig.get_config_var
+
+opt_flags = [flag for (flag, val) in opts]
+
+if '--help' in opt_flags:
+ exit_with_usage(code=0)
+
+for opt in opt_flags:
+ if opt == '--prefix':
+ print(sysconfig.get_config_var('prefix'))
+
+ elif opt == '--exec-prefix':
+ print(sysconfig.get_config_var('exec_prefix'))
+
+ elif opt in ('--includes', '--cflags'):
+ flags = ['-I' + sysconfig.get_path('include'),
+ '-I' + sysconfig.get_path('platinclude')]
+ if opt == '--cflags':
+ flags.extend(getvar('CFLAGS').split())
+ print(' '.join(flags))
+
+ elif opt in ('--libs', '--ldflags'):
+ abiflags = getattr(sys, 'abiflags', '')
+ libs = ['-lpython' + pyver + abiflags]
+ libs += getvar('LIBS').split()
+ libs += getvar('SYSLIBS').split()
+ # add the prefix/lib/pythonX.Y/config dir, but only if there is no
+ # shared library in prefix/lib/.
+ if opt == '--ldflags':
+ if not getvar('Py_ENABLE_SHARED'):
+ libs.insert(0, '-L' + getvar('LIBPL'))
+ if not getvar('PYTHONFRAMEWORK'):
+ libs.extend(getvar('LINKFORSHARED').split())
+ print(' '.join(libs))
+
+ elif opt == '--extension-suffix':
+ ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
+ if ext_suffix is None:
+ ext_suffix = sysconfig.get_config_var('SO')
+ print(ext_suffix)
+
+ elif opt == '--abiflags':
+ if not getattr(sys, 'abiflags', None):
+ exit_with_usage()
+ print(sys.abiflags)
+
+ elif opt == '--configdir':
+ print(sysconfig.get_config_var('LIBPL'))
diff --git a/project/venv/bin/python2 b/project/venv/bin/python2
new file mode 120000
index 0000000..d8654aa
--- /dev/null
+++ b/project/venv/bin/python2
@@ -0,0 +1 @@
+python
\ No newline at end of file
diff --git a/project/venv/bin/python2.7 b/project/venv/bin/python2.7
new file mode 120000
index 0000000..d8654aa
--- /dev/null
+++ b/project/venv/bin/python2.7
@@ -0,0 +1 @@
+python
\ No newline at end of file
diff --git a/project/venv/bin/wheel b/project/venv/bin/wheel
new file mode 100755
index 0000000..951b3ca
--- /dev/null
+++ b/project/venv/bin/wheel
@@ -0,0 +1,10 @@
+#!/home/angeliki/Documents/0918_PZI/0918_Grad/xpub_graduation/project/venv/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from wheel.cli import main
+
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/project/venv/include/python2.7 b/project/venv/include/python2.7
new file mode 120000
index 0000000..ad4ca80
--- /dev/null
+++ b/project/venv/include/python2.7
@@ -0,0 +1 @@
+/usr/include/python2.7
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/UserDict.py b/project/venv/lib/python2.7/UserDict.py
new file mode 120000
index 0000000..1dcde33
--- /dev/null
+++ b/project/venv/lib/python2.7/UserDict.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/UserDict.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/UserDict.pyc b/project/venv/lib/python2.7/UserDict.pyc
new file mode 100644
index 0000000..f9d01c1
Binary files /dev/null and b/project/venv/lib/python2.7/UserDict.pyc differ
diff --git a/project/venv/lib/python2.7/_abcoll.py b/project/venv/lib/python2.7/_abcoll.py
new file mode 120000
index 0000000..e39c38d
--- /dev/null
+++ b/project/venv/lib/python2.7/_abcoll.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/_abcoll.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/_abcoll.pyc b/project/venv/lib/python2.7/_abcoll.pyc
new file mode 100644
index 0000000..a1cafa0
Binary files /dev/null and b/project/venv/lib/python2.7/_abcoll.pyc differ
diff --git a/project/venv/lib/python2.7/_weakrefset.py b/project/venv/lib/python2.7/_weakrefset.py
new file mode 120000
index 0000000..a3c1cd4
--- /dev/null
+++ b/project/venv/lib/python2.7/_weakrefset.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/_weakrefset.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/_weakrefset.pyc b/project/venv/lib/python2.7/_weakrefset.pyc
new file mode 100644
index 0000000..0730d67
Binary files /dev/null and b/project/venv/lib/python2.7/_weakrefset.pyc differ
diff --git a/project/venv/lib/python2.7/abc.py b/project/venv/lib/python2.7/abc.py
new file mode 120000
index 0000000..cb3e5d1
--- /dev/null
+++ b/project/venv/lib/python2.7/abc.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/abc.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/abc.pyc b/project/venv/lib/python2.7/abc.pyc
new file mode 100644
index 0000000..a974b9e
Binary files /dev/null and b/project/venv/lib/python2.7/abc.pyc differ
diff --git a/project/venv/lib/python2.7/codecs.py b/project/venv/lib/python2.7/codecs.py
new file mode 120000
index 0000000..50169dc
--- /dev/null
+++ b/project/venv/lib/python2.7/codecs.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/codecs.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/codecs.pyc b/project/venv/lib/python2.7/codecs.pyc
new file mode 100644
index 0000000..900ca90
Binary files /dev/null and b/project/venv/lib/python2.7/codecs.pyc differ
diff --git a/project/venv/lib/python2.7/copy_reg.py b/project/venv/lib/python2.7/copy_reg.py
new file mode 120000
index 0000000..5dc0af3
--- /dev/null
+++ b/project/venv/lib/python2.7/copy_reg.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/copy_reg.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/copy_reg.pyc b/project/venv/lib/python2.7/copy_reg.pyc
new file mode 100644
index 0000000..4869f65
Binary files /dev/null and b/project/venv/lib/python2.7/copy_reg.pyc differ
diff --git a/project/venv/lib/python2.7/distutils/__init__.py b/project/venv/lib/python2.7/distutils/__init__.py
new file mode 100644
index 0000000..b9b0f24
--- /dev/null
+++ b/project/venv/lib/python2.7/distutils/__init__.py
@@ -0,0 +1,134 @@
+import os
+import sys
+import warnings
+
+# opcode is not a virtualenv module, so we can use it to find the stdlib
+# Important! To work on pypy, this must be a module that resides in the
+# lib-python/modified-x.y.z directory
+import opcode
+
+dirname = os.path.dirname
+
+distutils_path = os.path.join(os.path.dirname(opcode.__file__), "distutils")
+if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
+ warnings.warn("The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
+else:
+ __path__.insert(0, distutils_path) # noqa: F821
+ if sys.version_info < (3, 4):
+ import imp
+
+ real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ("", "", imp.PKG_DIRECTORY))
+ else:
+ import importlib.machinery
+
+ distutils_path = os.path.join(distutils_path, "__init__.py")
+ loader = importlib.machinery.SourceFileLoader("_virtualenv_distutils", distutils_path)
+ if sys.version_info < (3, 5):
+ import types
+
+ real_distutils = types.ModuleType(loader.name)
+ else:
+ import importlib.util
+
+ spec = importlib.util.spec_from_loader(loader.name, loader)
+ real_distutils = importlib.util.module_from_spec(spec)
+ loader.exec_module(real_distutils)
+
+ # Copy the relevant attributes
+ try:
+ __revision__ = real_distutils.__revision__
+ except AttributeError:
+ pass
+ __version__ = real_distutils.__version__
+
+from distutils import dist, sysconfig # isort:skip
+
+try:
+ basestring
+except NameError:
+ basestring = str
+
+# patch build_ext (distutils doesn't know how to get the libs directory
+# path on windows - it hardcodes the paths around the patched sys.prefix)
+
+if sys.platform == "win32":
+ from distutils.command.build_ext import build_ext as old_build_ext
+
+ class build_ext(old_build_ext):
+ def finalize_options(self):
+ if self.library_dirs is None:
+ self.library_dirs = []
+ elif isinstance(self.library_dirs, basestring):
+ self.library_dirs = self.library_dirs.split(os.pathsep)
+
+ self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
+ old_build_ext.finalize_options(self)
+
+ from distutils.command import build_ext as build_ext_module
+
+ build_ext_module.build_ext = build_ext
+
+# distutils.dist patches:
+
+old_find_config_files = dist.Distribution.find_config_files
+
+
+def find_config_files(self):
+ found = old_find_config_files(self)
+ if os.name == "posix":
+ user_filename = ".pydistutils.cfg"
+ else:
+ user_filename = "pydistutils.cfg"
+ user_filename = os.path.join(sys.prefix, user_filename)
+ if os.path.isfile(user_filename):
+ for item in list(found):
+ if item.endswith("pydistutils.cfg"):
+ found.remove(item)
+ found.append(user_filename)
+ return found
+
+
+dist.Distribution.find_config_files = find_config_files
+
+# distutils.sysconfig patches:
+
+old_get_python_inc = sysconfig.get_python_inc
+
+
+def sysconfig_get_python_inc(plat_specific=0, prefix=None):
+ if prefix is None:
+ prefix = sys.real_prefix
+ return old_get_python_inc(plat_specific, prefix)
+
+
+sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
+sysconfig.get_python_inc = sysconfig_get_python_inc
+
+old_get_python_lib = sysconfig.get_python_lib
+
+
+def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
+ if standard_lib and prefix is None:
+ prefix = sys.real_prefix
+ return old_get_python_lib(plat_specific, standard_lib, prefix)
+
+
+sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
+sysconfig.get_python_lib = sysconfig_get_python_lib
+
+old_get_config_vars = sysconfig.get_config_vars
+
+
+def sysconfig_get_config_vars(*args):
+ real_vars = old_get_config_vars(*args)
+ if sys.platform == "win32":
+ lib_dir = os.path.join(sys.real_prefix, "libs")
+ if isinstance(real_vars, dict) and "LIBDIR" not in real_vars:
+ real_vars["LIBDIR"] = lib_dir # asked for all
+ elif isinstance(real_vars, list) and "LIBDIR" in args:
+ real_vars = real_vars + [lib_dir] # asked for list
+ return real_vars
+
+
+sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
+sysconfig.get_config_vars = sysconfig_get_config_vars
diff --git a/project/venv/lib/python2.7/distutils/__init__.pyc b/project/venv/lib/python2.7/distutils/__init__.pyc
new file mode 100644
index 0000000..bfb8eaa
Binary files /dev/null and b/project/venv/lib/python2.7/distutils/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/distutils/distutils.cfg b/project/venv/lib/python2.7/distutils/distutils.cfg
new file mode 100644
index 0000000..1af230e
--- /dev/null
+++ b/project/venv/lib/python2.7/distutils/distutils.cfg
@@ -0,0 +1,6 @@
+# This is a config file local to this virtualenv installation
+# You may include options that will be used by all distutils commands,
+# and by easy_install. For instance:
+#
+# [easy_install]
+# find_links = http://mylocalsite
diff --git a/project/venv/lib/python2.7/encodings b/project/venv/lib/python2.7/encodings
new file mode 120000
index 0000000..1250ad8
--- /dev/null
+++ b/project/venv/lib/python2.7/encodings
@@ -0,0 +1 @@
+/usr/lib/python2.7/encodings
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/fnmatch.py b/project/venv/lib/python2.7/fnmatch.py
new file mode 120000
index 0000000..ec3e10c
--- /dev/null
+++ b/project/venv/lib/python2.7/fnmatch.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/fnmatch.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/fnmatch.pyc b/project/venv/lib/python2.7/fnmatch.pyc
new file mode 100644
index 0000000..37c070f
Binary files /dev/null and b/project/venv/lib/python2.7/fnmatch.pyc differ
diff --git a/project/venv/lib/python2.7/genericpath.py b/project/venv/lib/python2.7/genericpath.py
new file mode 120000
index 0000000..cb8897c
--- /dev/null
+++ b/project/venv/lib/python2.7/genericpath.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/genericpath.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/genericpath.pyc b/project/venv/lib/python2.7/genericpath.pyc
new file mode 100644
index 0000000..929f820
Binary files /dev/null and b/project/venv/lib/python2.7/genericpath.pyc differ
diff --git a/project/venv/lib/python2.7/lib-dynload b/project/venv/lib/python2.7/lib-dynload
new file mode 120000
index 0000000..c706a1e
--- /dev/null
+++ b/project/venv/lib/python2.7/lib-dynload
@@ -0,0 +1 @@
+/usr/lib/python2.7/lib-dynload
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/linecache.py b/project/venv/lib/python2.7/linecache.py
new file mode 120000
index 0000000..943c429
--- /dev/null
+++ b/project/venv/lib/python2.7/linecache.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/linecache.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/linecache.pyc b/project/venv/lib/python2.7/linecache.pyc
new file mode 100644
index 0000000..aad96e2
Binary files /dev/null and b/project/venv/lib/python2.7/linecache.pyc differ
diff --git a/project/venv/lib/python2.7/locale.py b/project/venv/lib/python2.7/locale.py
new file mode 120000
index 0000000..92c243c
--- /dev/null
+++ b/project/venv/lib/python2.7/locale.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/locale.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/locale.pyc b/project/venv/lib/python2.7/locale.pyc
new file mode 100644
index 0000000..bdfa23c
Binary files /dev/null and b/project/venv/lib/python2.7/locale.pyc differ
diff --git a/project/venv/lib/python2.7/no-global-site-packages.txt b/project/venv/lib/python2.7/no-global-site-packages.txt
new file mode 100644
index 0000000..e69de29
diff --git a/project/venv/lib/python2.7/ntpath.py b/project/venv/lib/python2.7/ntpath.py
new file mode 120000
index 0000000..5659ae1
--- /dev/null
+++ b/project/venv/lib/python2.7/ntpath.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/ntpath.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/ntpath.pyc b/project/venv/lib/python2.7/ntpath.pyc
new file mode 100644
index 0000000..799ce32
Binary files /dev/null and b/project/venv/lib/python2.7/ntpath.pyc differ
diff --git a/project/venv/lib/python2.7/orig-prefix.txt b/project/venv/lib/python2.7/orig-prefix.txt
new file mode 100644
index 0000000..e25db58
--- /dev/null
+++ b/project/venv/lib/python2.7/orig-prefix.txt
@@ -0,0 +1 @@
+/usr
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/os.py b/project/venv/lib/python2.7/os.py
new file mode 120000
index 0000000..950fc8d
--- /dev/null
+++ b/project/venv/lib/python2.7/os.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/os.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/os.pyc b/project/venv/lib/python2.7/os.pyc
new file mode 100644
index 0000000..f84c6e5
Binary files /dev/null and b/project/venv/lib/python2.7/os.pyc differ
diff --git a/project/venv/lib/python2.7/posixpath.py b/project/venv/lib/python2.7/posixpath.py
new file mode 120000
index 0000000..30cb8ca
--- /dev/null
+++ b/project/venv/lib/python2.7/posixpath.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/posixpath.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/posixpath.pyc b/project/venv/lib/python2.7/posixpath.pyc
new file mode 100644
index 0000000..3bc4de5
Binary files /dev/null and b/project/venv/lib/python2.7/posixpath.pyc differ
diff --git a/project/venv/lib/python2.7/re.py b/project/venv/lib/python2.7/re.py
new file mode 120000
index 0000000..56a0731
--- /dev/null
+++ b/project/venv/lib/python2.7/re.py
@@ -0,0 +1 @@
+/usr/lib/python2.7/re.py
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/re.pyc b/project/venv/lib/python2.7/re.pyc
new file mode 100644
index 0000000..53d7398
Binary files /dev/null and b/project/venv/lib/python2.7/re.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE
new file mode 100644
index 0000000..e8f8805
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017-2019 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA
new file mode 100644
index 0000000..e4ea8f1
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/METADATA
@@ -0,0 +1,40 @@
+Metadata-Version: 2.1
+Name: PyYAML
+Version: 5.1
+Summary: YAML parser and emitter for Python
+Home-page: https://github.com/yaml/pyyaml
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages. PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages. PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence.
+
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD
new file mode 100644
index 0000000..6d74479
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/RECORD
@@ -0,0 +1,40 @@
+PyYAML-5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+PyYAML-5.1.dist-info/LICENSE,sha256=oq25yVm3l0lKDvgL32DiLbJ0nuPgwJCFVuPrVI-WfFY,1101
+PyYAML-5.1.dist-info/METADATA,sha256=33vAx8vSzjUMXGHO2jp0u8tvgkRqfAH3-OEDSpjfIx8,1704
+PyYAML-5.1.dist-info/RECORD,,
+PyYAML-5.1.dist-info/WHEEL,sha256=HMBxudg1Bdignf3xswgnPWbkfRGI13bogQQwWC4xJ5w,105
+PyYAML-5.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
+yaml/__init__.py,sha256=Ke2gASae4i0HLUxbz8l2CUR3__qpspvBBI1gltOxQbE,12182
+yaml/__init__.pyc,,
+yaml/composer.py,sha256=TtvWJCaffdVHhHx_muLguloDUwg1NUbo-GlJ0me6xuA,4923
+yaml/composer.pyc,,
+yaml/constructor.py,sha256=HrBLwPOrijfcmurB4Ww71xrTtMKehs0-xh3NNlQzveY,26834
+yaml/constructor.pyc,,
+yaml/cyaml.py,sha256=6MZKSJHTWwoct5urK9qbGXAhM1xz_Boawlf5SVMC85o,3842
+yaml/cyaml.pyc,,
+yaml/dumper.py,sha256=yMFwlojOY-M4EMUT8FiROY2QC2GAGQO7Iiky0xBlJFc,2833
+yaml/dumper.pyc,,
+yaml/emitter.py,sha256=09BwSivdd4BbPgx2rbFcoMOa9H1yZxqY1YILjOXvFeo,43427
+yaml/emitter.pyc,,
+yaml/error.py,sha256=7K-NdIv0qNKPKbnXxEg0L_b9K7nYDORr3rzm8_b-iBY,2559
+yaml/error.pyc,,
+yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
+yaml/events.pyc,,
+yaml/loader.py,sha256=T61DzDkbfResZoQfVy1U3vN0_mKFyBALB9et-478-w4,2055
+yaml/loader.pyc,,
+yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
+yaml/nodes.pyc,,
+yaml/parser.py,sha256=sgXahZA3DkySYnaC4D_zcl3l2y4Y5R40icWtdwkF_NE,25542
+yaml/parser.pyc,,
+yaml/reader.py,sha256=y6cj-OcZjnFDO1u6Blmi-tKoi-3Pjvmo2PikMnO5FFc,6850
+yaml/reader.pyc,,
+yaml/representer.py,sha256=BaJcXjs4a1yjYMnCKxJ39TZlWahVu2OE7K6dEEy0Bqo,17791
+yaml/representer.pyc,,
+yaml/resolver.py,sha256=5Z3boiMikL6Qt6fS5Mt8fHym0GxbW7CMT2f2fnD1ZPQ,9122
+yaml/resolver.pyc,,
+yaml/scanner.py,sha256=Umb0oXZGFIf814tr_CUPPfKfeWUZj9ucqz06q20v0Fg,52027
+yaml/scanner.pyc,,
+yaml/serializer.py,sha256=tRsRwfu5E9fpLU7LY3vBQf2prt77hwnYlMt5dnBJLig,4171
+yaml/serializer.pyc,,
+yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
+yaml/tokens.pyc,,
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL
new file mode 100644
index 0000000..e28e857
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: false
+Tag: cp27-cp27mu-linux_x86_64
+
diff --git a/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt
new file mode 100644
index 0000000..e6475e9
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/PyYAML-5.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_yaml
+yaml
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000..e118723
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/DESCRIPTION.rst
@@ -0,0 +1,3 @@
+UNKNOWN
+
+
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA
new file mode 100644
index 0000000..dff5bc5
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/METADATA
@@ -0,0 +1,14 @@
+Metadata-Version: 2.0
+Name: TextGrid
+Version: 1.4
+Summary: Praat TextGrid manipulation.
+Home-page: UNKNOWN
+Author: Kyle Gorman et al.
+Author-email: kylebgorman@gmail.com
+License: UNKNOWN
+Description-Content-Type: UNKNOWN
+Platform: UNKNOWN
+
+UNKNOWN
+
+
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD
new file mode 100644
index 0000000..f28014d
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/RECORD
@@ -0,0 +1,13 @@
+TextGrid-1.4.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
+TextGrid-1.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+TextGrid-1.4.dist-info/METADATA,sha256=IFHsDhZZFuOJdumPEBkSTUqAlDSJMDUJW05UhdRiGjg,250
+TextGrid-1.4.dist-info/RECORD,,
+TextGrid-1.4.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116
+TextGrid-1.4.dist-info/metadata.json,sha256=Nue5ZSguO37SYRnPFE8MCpWaMwa5J-nxt6us3cS4MHc,372
+TextGrid-1.4.dist-info/top_level.txt,sha256=XHJGg7FS4QKvUdqLkmC2SmknY5vhVIxj0ooAPxOaMuY,9
+textgrid/__init__.py,sha256=k-l1bvhZB1An_ijRtB4kbjWCuBWjxlmzgeb-9ybsyZ4,79
+textgrid/__init__.pyc,,
+textgrid/exceptions.py,sha256=pUSc3V9Vc4dzReiBF1lE7ANOjIijvkSDYKQYyFCajus,43
+textgrid/exceptions.pyc,,
+textgrid/textgrid.py,sha256=eVCsld7TCG_BcBS8RhsEtWf-273CmqW96-ddyuZN2v4,33505
+textgrid/textgrid.pyc,,
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL
new file mode 100644
index 0000000..ab4a09e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json
new file mode 100644
index 0000000..301f94e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/metadata.json
@@ -0,0 +1 @@
+{"description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "kylebgorman@gmail.com", "name": "Kyle Gorman et al.", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "TextGrid", "summary": "Praat TextGrid manipulation.", "version": "1.4"}
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt
new file mode 100644
index 0000000..62dbf12
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/TextGrid-1.4.dist-info/top_level.txt
@@ -0,0 +1 @@
+textgrid
diff --git a/project/venv/lib/python2.7/site-packages/easy_install.py b/project/venv/lib/python2.7/site-packages/easy_install.py
new file mode 100644
index 0000000..d87e984
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+ from setuptools.command.easy_install import main
+ main()
diff --git a/project/venv/lib/python2.7/site-packages/easy_install.pyc b/project/venv/lib/python2.7/site-packages/easy_install.pyc
new file mode 100644
index 0000000..33e67ee
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/easy_install.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA
new file mode 100644
index 0000000..84454d7
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/METADATA
@@ -0,0 +1,54 @@
+Metadata-Version: 2.1
+Name: numpy
+Version: 1.16.2
+Summary: NumPy is the fundamental package for array computing with Python.
+Home-page: https://www.numpy.org
+Author: Travis E. Oliphant et al.
+Maintainer: NumPy Developers
+Maintainer-email: numpy-discussion@python.org
+License: BSD
+Download-URL: https://pypi.python.org/pypi/numpy
+Platform: Windows
+Platform: Linux
+Platform: Solaris
+Platform: Mac OS-X
+Platform: Unix
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development
+Classifier: Topic :: Scientific/Engineering
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
+
+It provides:
+
+- a powerful N-dimensional array object
+- sophisticated (broadcasting) functions
+- tools for integrating C/C++ and Fortran code
+- useful linear algebra, Fourier transform, and random number capabilities
+- and much more
+
+Besides its obvious scientific uses, NumPy can also be used as an efficient
+multi-dimensional container of generic data. Arbitrary data-types can be
+defined. This allows NumPy to seamlessly and speedily integrate with a wide
+variety of databases.
+
+All NumPy wheels distributed on PyPI are BSD licensed.
+
+
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD
new file mode 100644
index 0000000..5931327
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/RECORD
@@ -0,0 +1,803 @@
+../../../bin/f2py,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286
+../../../bin/f2py2,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286
+../../../bin/f2py2.7,sha256=WCOexfKhCMTNtQGohYPYG2qfcYBxXcHfYqRTX9uw8x0,286
+numpy-1.16.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+numpy-1.16.2.dist-info/METADATA,sha256=MyfSLq4okJQpnE7_0yoqjdO3I3YvMALqZOnJdFCvr7U,1948
+numpy-1.16.2.dist-info/RECORD,,
+numpy-1.16.2.dist-info/WHEEL,sha256=M5Ujap42zjfAFnpJOoFU72TFHuBKh-JF0Rqu5vZhkVE,110
+numpy-1.16.2.dist-info/entry_points.txt,sha256=EvEfDEP2oBMNGDgk_sl6nu5nHwbMn1G_bN_aoiCI4ZY,113
+numpy-1.16.2.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6
+numpy/.libs/libgfortran-ed201abd.so.3.0.0,sha256=xkH_25s9lTuTrEf3fE8GZK68sYSPsIlP3SJ6Qze9aIM,1023960
+numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so,sha256=MgisyqBW3cKbBpB8GwsyuN2JKQUJtVPllGByvSYLck4,29764696
+numpy/LICENSE.txt,sha256=75wRgMQl3M781Yx0TIDCm5QIlvSAKTqjJnit7QWANzQ,46470
+numpy/__config__.py,sha256=6JW9C97RVmv2Rqmvmsh704QBrLHxkyJpFHadrlll9LE,1554
+numpy/__config__.pyc,,
+numpy/__init__.py,sha256=6bqV6_Ot0ysgHlDh0mhaoowho85c5GC5OEtyZrScd9U,7110
+numpy/__init__.pyc,,
+numpy/_distributor_init.py,sha256=IgPkSK3H9bgjFeUfWuXhjKrgetQl5ztUW-rTyjGHK3c,331
+numpy/_distributor_init.pyc,,
+numpy/_globals.py,sha256=p8xxERZsxjGPUWV9pMY3jz75NZxDLppGeKaHbYGCDqM,2379
+numpy/_globals.pyc,,
+numpy/_pytesttester.py,sha256=eLWMwBiqamHoev8-VlmtvCaxV_gitqK7js-UkjUW4qs,6854
+numpy/_pytesttester.pyc,,
+numpy/compat/__init__.py,sha256=MHle4gJcrXh1w4SNv0mz5rbUTAjAzHnyO3rtbSW3AUo,498
+numpy/compat/__init__.pyc,,
+numpy/compat/_inspect.py,sha256=xEImUFhm4VAzT2LJj2Va_yDAHJsdy0RwSi1JwOOhykU,7513
+numpy/compat/_inspect.pyc,,
+numpy/compat/py3k.py,sha256=pBEgOIEsaycTciDcN33Dt2WdNA8c3JBtxkSuzA2g8TQ,6663
+numpy/compat/py3k.pyc,,
+numpy/compat/setup.py,sha256=REJcwNU7EbfwBFS1FHazGJcUhh50_5gYttr3BSczCiM,382
+numpy/compat/setup.pyc,,
+numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/compat/tests/__init__.pyc,,
+numpy/compat/tests/test_compat.py,sha256=KI-7Ru3Ia5TwGBmBWTM3Qjq6G_CFOzX1uId--9MecXo,691
+numpy/compat/tests/test_compat.pyc,,
+numpy/conftest.py,sha256=KLI46jUH5d7LGWeUI5WuGbAL1_2TukfRWKnlue4Ncrk,1686
+numpy/conftest.pyc,,
+numpy/core/__init__.py,sha256=_QCag_dScAatINZX8mlDcGdabAemkT8iq9GiWWgWisE,5928
+numpy/core/__init__.pyc,,
+numpy/core/_add_newdocs.py,sha256=I_ndaFJhI6iD5ySeC2HGpGRdPgv5JIP2vd6_Wo2tbjo,203038
+numpy/core/_add_newdocs.pyc,,
+numpy/core/_aliased_types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/core/_aliased_types.pyc,,
+numpy/core/_dtype.py,sha256=rvaeOf-ypqhRrb4_Aljt2CXX6uZrX0STQ0pQwlXTKv4,9688
+numpy/core/_dtype.pyc,,
+numpy/core/_dtype_ctypes.py,sha256=srVPSI6kJvXjzIvkBQDU1itAfT-vCZKeyjgCF3yC-6Q,3448
+numpy/core/_dtype_ctypes.pyc,,
+numpy/core/_dummy.so,sha256=6ymA-LVfcdEzHE71a6qp91sQ-WTxcqP2rx3vXPIsapM,15911
+numpy/core/_internal.py,sha256=U3CSOXeUC79XPV-ZaxlhniUt3q5iFJXlQVFeIcwcsY4,27779
+numpy/core/_internal.pyc,,
+numpy/core/_methods.py,sha256=3QDUJ2FAN317yU5fE6lPdFgBVrnBaiYnUkuTu1vWmoU,5083
+numpy/core/_methods.pyc,,
+numpy/core/_multiarray_tests.so,sha256=Yb9JupRaApkHMPry22_gupVhAqw2wWk0D3Mmo4b9OY4,546243
+numpy/core/_multiarray_umath.so,sha256=yDVY4aDlT98Wlvgo1tYgO3L51DSaQfgZFRNH2inY1IE,19667840
+numpy/core/_operand_flag_tests.so,sha256=FkVNAuX0uAk6KjUMNJiSURvBXs33ytk_HPE-dd_zrjs,29842
+numpy/core/_rational_tests.so,sha256=VFcpJ5c5MPOkT8HbLcHVO2_Juz0nGM14R7byTD5cVjk,261212
+numpy/core/_string_helpers.py,sha256=NGGGhaFdU5eGiUAj3GTIBoOgWs4r9aTNlsE2r9NgX6Q,2855
+numpy/core/_string_helpers.pyc,,
+numpy/core/_struct_ufunc_tests.so,sha256=1r1lYPRdWLixFyn7aczDqsLn2X_EOMl_n5lMp9zkLcI,30069
+numpy/core/_type_aliases.py,sha256=FA2Pz5OKqcLl1QKLJNu-ETHIzQ1ii3LH5pSdHhZkfZA,9181
+numpy/core/_type_aliases.pyc,,
+numpy/core/_umath_tests.so,sha256=TddxXbUXnLoLhXFUJx9Tn0WZi4sY5Fb9OG6Zxi0swyM,82716
+numpy/core/arrayprint.py,sha256=9-GT05iiKZvMAjhoN1AUraQ1BzRaWmmq5_n0eWPp9L4,60310
+numpy/core/arrayprint.pyc,,
+numpy/core/cversions.py,sha256=ukYNpkei0Coi7DOcbroXuDoXc6kl5odxmcy_39pszA0,413
+numpy/core/cversions.pyc,,
+numpy/core/defchararray.py,sha256=gRmZlrryuLgNQLsFjMzZz4X2hFnTZzzqfkpiTWwvthI,71118
+numpy/core/defchararray.pyc,,
+numpy/core/einsumfunc.py,sha256=-P82bltMMGjUCRpYjJYohGdPpxLExucGlWJLMA7XxDw,51207
+numpy/core/einsumfunc.pyc,,
+numpy/core/fromnumeric.py,sha256=gza7xG7bWMWNtrQ0mV4719FWHpOAmk27LnhXsdQ5Ics,109555
+numpy/core/fromnumeric.pyc,,
+numpy/core/function_base.py,sha256=_1hlf2CCd-eXq5p7sJz56RhzrqyMmzBQjfti-i-uhbQ,16336
+numpy/core/function_base.pyc,,
+numpy/core/generate_numpy_api.py,sha256=0JBYTvekUeJyhp7QMKtWJSK-L6lVNhev16y0F2qX2pU,7470
+numpy/core/generate_numpy_api.pyc,,
+numpy/core/getlimits.py,sha256=zGUdeXCw8GAWly8MCTL6xWEyoBDszviDpc47SbS_BhY,18936
+numpy/core/getlimits.pyc,,
+numpy/core/include/numpy/__multiarray_api.h,sha256=MS8l193p6aGZSSuV6ube6F9Ms7Wsn5TKVv9WHM7eCkw,60958
+numpy/core/include/numpy/__ufunc_api.h,sha256=szHiF_4UY3EY8wH0q1YW9UmoakOHQTsHog75MJwEyTg,12143
+numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=hNiUJ3gmJRxdjByk5R5jmLeBKpNfaP_29KLHFuTrSIA,1861
+numpy/core/include/numpy/_numpyconfig.h,sha256=bDiTLQ972ZWQBEpx6OM8riS64nSAelKa2kIimnXm_Ss,1010
+numpy/core/include/numpy/arrayobject.h,sha256=SXj-2avTHV8mNWvv7sOYHLKkRKcafDG7_HNpQNot1GE,164
+numpy/core/include/numpy/arrayscalars.h,sha256=vC7QCznlT8vkyvxbIh4QNwi1LR7UkP7GJ1j_0ZiJa1E,3509
+numpy/core/include/numpy/halffloat.h,sha256=ohvyl3Kz3mB1hW3MRzxwPDH-0L9WWM_eKhvYLjtT_2w,1878
+numpy/core/include/numpy/multiarray_api.txt,sha256=Panvwe-mLDLFw9WU90x2M7nqrCE99JzBD186Xa5R4po,56385
+numpy/core/include/numpy/ndarrayobject.h,sha256=ZVCR5RE1W4QUJ8X6jeai-9gwWvgDkImR8ZEH1XI2wl0,11507
+numpy/core/include/numpy/ndarraytypes.h,sha256=eHZA7lbctLPjUPdyXu6ITCnyOmlt88IIG2rZkjs4DAA,64719
+numpy/core/include/numpy/noprefix.h,sha256=YE-lWegAdZKI5lf44AW5jiWbnmO6hircWzj_WMFrLT4,6786
+numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=LLeZKLuJADU3RDfT04pu5FCxCBU5cEzY5Q9phR_HL78,4715
+numpy/core/include/numpy/npy_3kcompat.h,sha256=exFgMT6slmo2Zg3bFsY3mKLUrrkg3KU_66gUmu5IYKk,14666
+numpy/core/include/numpy/npy_common.h,sha256=FIVNq2pSdIRJsoi56GAruahhfp1OwafweqCDhakUc8w,37277
+numpy/core/include/numpy/npy_cpu.h,sha256=3frXChwN0Cxca-sAeTTOJCiZ6_2q1EuggUwqEotdXLg,3879
+numpy/core/include/numpy/npy_endian.h,sha256=HHanBydLvLC2anJJySvy6wZ_lYaC_xI6GNwT8cJ78rE,2596
+numpy/core/include/numpy/npy_interrupt.h,sha256=Eyddk806h30jxgymbr44b7eIZKrHXtNzXpPtUPp2Ng8,3439
+numpy/core/include/numpy/npy_math.h,sha256=AeaXjX76YzIMT67EdZMnjG1--x61UI2htuGtGwgfA24,18838
+numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=X-wRYdpuwIuerTnBblKjR7Dqsv8rqxn01RFLVWUHvi8,567
+numpy/core/include/numpy/npy_os.h,sha256=cEvEvpD92EeFjsjRelw1dXJaHYL-0yPJDuz3VeSJs4E,817
+numpy/core/include/numpy/numpyconfig.h,sha256=J5BLHoCyhe383tIM4YriMgYDjOPC4xWzRvqBPyNCTOE,1207
+numpy/core/include/numpy/old_defines.h,sha256=7eiZoi7JrdVT9LXKCoeta5AoIncGa98GcVlWqDrLjwk,6306
+numpy/core/include/numpy/oldnumeric.h,sha256=Yo-LiSzVfDK2YyhlH41ff4gS0m-lv8XjI4JcAzpdy94,708
+numpy/core/include/numpy/ufunc_api.txt,sha256=2d31yVD80vKEgf5Pr5JtDjnVjc2DLfiXRWQuq7KT5wc,6889
+numpy/core/include/numpy/ufuncobject.h,sha256=ocjHj2QCTYkbxIdSmPi-2k3vzKNJ96oB7giwiWFS2i0,13051
+numpy/core/include/numpy/utils.h,sha256=KqJzngAvarYV3oZQu5fY0ARPVihUP7FsZjdljysaSUk,729
+numpy/core/info.py,sha256=SjDs9EfOswEy-ABgUr9f09v83sUdhmwFXRlaZbOGCnA,4692
+numpy/core/info.pyc,,
+numpy/core/lib/libnpymath.a,sha256=b-gdLaZupHyZrHs-1v0f7TG5QQrmR0RwgJjmiGO3Y0o,350538
+numpy/core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147
+numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360
+numpy/core/machar.py,sha256=StiB_u3QIWj55RQASESDLjv_8650DE8vuCd7evhlmDI,10854
+numpy/core/machar.pyc,,
+numpy/core/memmap.py,sha256=4SEtSehRX8SJtGseMP1hm4LgrRR4oLa20wlRLUy4aJU,11612
+numpy/core/memmap.pyc,,
+numpy/core/multiarray.py,sha256=26mdC_rn84U2WwIjf6mZX7rW_YugkEHRIDZBkKrRImQ,50606
+numpy/core/multiarray.pyc,,
+numpy/core/numeric.py,sha256=OaLQxu1NQmiehpC5BHRLMBIKXUqiqmDTaqhIgvN8K5M,92560
+numpy/core/numeric.pyc,,
+numpy/core/numerictypes.py,sha256=8esH4zpSWgjUnfpe9CK0D5MmBPojh6iWLGEDza9Pi2E,17849
+numpy/core/numerictypes.pyc,,
+numpy/core/overrides.py,sha256=OSkSrhC7dl63cK9Pr0qt4X5rMsB025MNoPV-_o15VNM,6658
+numpy/core/overrides.pyc,,
+numpy/core/records.py,sha256=6N9oqz53J_5eGPsR9DxltkYxJdzraA2dJil0xBiqJm4,30418
+numpy/core/records.pyc,,
+numpy/core/setup.py,sha256=3vqfmmG_JLENcuBVmjzpiznKMciwKcd1a6B0MOE7z2I,41384
+numpy/core/setup.pyc,,
+numpy/core/setup_common.py,sha256=8Ywhbj-uud98QbEgIZEQGvmpD6S2Ve-dlOYqb_9lkmw,16571
+numpy/core/setup_common.pyc,,
+numpy/core/shape_base.py,sha256=4Sjp5Jjs5BfLJ0rU6PUrxl-hNk6ye5IcrpbUNbD667U,28273
+numpy/core/shape_base.pyc,,
+numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/core/tests/__init__.pyc,,
+numpy/core/tests/_locales.py,sha256=GQro3bha8c5msgQyvNzmDUrNwqS2cGkKKuN4gg4c6tI,2266
+numpy/core/tests/_locales.pyc,,
+numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716
+numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640
+numpy/core/tests/test_abc.py,sha256=cpIqt3VFBZLHbuNpO4NuyCGgd--k1zij5aasu7FV77I,2402
+numpy/core/tests/test_abc.pyc,,
+numpy/core/tests/test_api.py,sha256=k7pN6IGolFQHBT7CmaRZHJqR59amWk6QcVK4RdqQkKc,18902
+numpy/core/tests/test_api.pyc,,
+numpy/core/tests/test_arrayprint.py,sha256=PPUppm9m6cPx2mjU0lQevTdnLSq0_6JIe8LyumN3ujM,34701
+numpy/core/tests/test_arrayprint.pyc,,
+numpy/core/tests/test_datetime.py,sha256=1h-LfflxFQCvWsZcQwK8Ry8mWW0mziuIQ1n-NX1Qqtc,101949
+numpy/core/tests/test_datetime.pyc,,
+numpy/core/tests/test_defchararray.py,sha256=L5EoOBTZVrRU1Vju5IhY8BSUlBOGPzEViKJwyQSlpXo,25481
+numpy/core/tests/test_defchararray.pyc,,
+numpy/core/tests/test_deprecations.py,sha256=BkNax57OUQmuEvqnR4Lv2p2y0UNpXPHhsGAR6a4qjmQ,21435
+numpy/core/tests/test_deprecations.pyc,,
+numpy/core/tests/test_dtype.py,sha256=lPGPQGw4_6ZPn4ns_7nmJNumlvUv_DALMvm8pymaqWY,43520
+numpy/core/tests/test_dtype.pyc,,
+numpy/core/tests/test_einsum.py,sha256=bg9t1Hu_z0G8xGWqJuIOqS21QMR76xdQl8xQhlejXPc,44090
+numpy/core/tests/test_einsum.pyc,,
+numpy/core/tests/test_errstate.py,sha256=5vN5Xiv9cILQ0j62P7DrZPvTAiAddRY4EfoCUGQqgMk,1335
+numpy/core/tests/test_errstate.pyc,,
+numpy/core/tests/test_extint128.py,sha256=-0zEInkai1qRhXI0bdHCguU_meD3s6Td4vUIBwirYQI,5709
+numpy/core/tests/test_extint128.pyc,,
+numpy/core/tests/test_function_base.py,sha256=L7fBYrSEjcZdVDF1I811LWwXRAGxfcOv8b68VDxZGQo,12977
+numpy/core/tests/test_function_base.pyc,,
+numpy/core/tests/test_getlimits.py,sha256=2fBK7Slo67kP6bThcN9bOKmeX9gGPQVUE17jGVydoXk,4427
+numpy/core/tests/test_getlimits.pyc,,
+numpy/core/tests/test_half.py,sha256=Hnlj7T4-kAKT-2gPa7-vNrbOOJ6qfZoaq0jW4CbCtRs,22300
+numpy/core/tests/test_half.pyc,,
+numpy/core/tests/test_indexerrors.py,sha256=0Ku3Sy5jcaE3D2KsyDrFTvgQzMv2dyWja3hc4t5-n_k,4857
+numpy/core/tests/test_indexerrors.pyc,,
+numpy/core/tests/test_indexing.py,sha256=i0P9N0vV6RKU_n-sT8whMeG-6ydRVKAREg-b3veKKXM,50602
+numpy/core/tests/test_indexing.pyc,,
+numpy/core/tests/test_item_selection.py,sha256=pMjd_8v5YC4WVjLLIrhmisPOG_DMw4j3YGEZ-UY1kOA,3599
+numpy/core/tests/test_item_selection.pyc,,
+numpy/core/tests/test_longdouble.py,sha256=Qzlzxtk0NTXUq2loU_RPFVruc3hwjmbOJy-VatXr0F0,6314
+numpy/core/tests/test_longdouble.pyc,,
+numpy/core/tests/test_machar.py,sha256=FrKeGhC7j-z9tApS_uI1E0DUkzieKIdUHMQPfCSM0t8,1141
+numpy/core/tests/test_machar.pyc,,
+numpy/core/tests/test_mem_overlap.py,sha256=AyBz4pm7HhTDdlW2pq9FR1AO0E5QAYdKpBoWbOdSrco,29505
+numpy/core/tests/test_mem_overlap.pyc,,
+numpy/core/tests/test_memmap.py,sha256=mYreq9HqWDz5Z9r6mmdvz4iB6VnUR61ytVCzzd3mhc0,7316
+numpy/core/tests/test_memmap.pyc,,
+numpy/core/tests/test_multiarray.py,sha256=2mzeCjnNFSIXEa0ZAp0myFEcQ31ENi4DHH8ERNI8Tn4,303271
+numpy/core/tests/test_multiarray.pyc,,
+numpy/core/tests/test_nditer.py,sha256=e2vzCwKhLjcjgE4zPv7YgFpCE78NWdDJpL__fCR-prU,112097
+numpy/core/tests/test_nditer.pyc,,
+numpy/core/tests/test_numeric.py,sha256=EwIsuf6vPm9YsbvmoHOklKEgGuRuf8hBs003FPk-Seo,102550
+numpy/core/tests/test_numeric.pyc,,
+numpy/core/tests/test_numerictypes.py,sha256=5NMUrwVqBH_qUFk-62QhUxMwg7FPPdm9hcPdftE_4zo,18526
+numpy/core/tests/test_numerictypes.pyc,,
+numpy/core/tests/test_overrides.py,sha256=qRa8AOOBnejS6ZfW7vP3g-jioFdIwBBJ3hYrPZXy7ks,13196
+numpy/core/tests/test_overrides.pyc,,
+numpy/core/tests/test_print.py,sha256=Q53dqbjQQIlCzRp_1ZY0A-ptP7FlbBZVPeMeMLX0cVg,6876
+numpy/core/tests/test_print.pyc,,
+numpy/core/tests/test_records.py,sha256=jFWlwrYqBGKSdmtq55Qqrg8dg-3wvcagr9KRXCC2wMA,17734
+numpy/core/tests/test_records.pyc,,
+numpy/core/tests/test_regression.py,sha256=DObYUilkZkErqk67UGsRfhgaNLma7vAZhS73uFJjmaE,87021
+numpy/core/tests/test_regression.pyc,,
+numpy/core/tests/test_scalar_ctors.py,sha256=kjyYllJHyhMQGT49Xbjjc2tuFHXcQIM-PAZExMWczq8,2294
+numpy/core/tests/test_scalar_ctors.pyc,,
+numpy/core/tests/test_scalarbuffer.py,sha256=0U9U95ogctbQb9ggbmgiQ2UmzXEpO6mbXnn4ciuAO50,3561
+numpy/core/tests/test_scalarbuffer.pyc,,
+numpy/core/tests/test_scalarinherit.py,sha256=iP5lLn-z1vtongCvDt-JBnHwNWNREx4ovu12Iy-mGiA,1838
+numpy/core/tests/test_scalarinherit.pyc,,
+numpy/core/tests/test_scalarmath.py,sha256=Z14wE1NY6S3UEBVIlNj3KfOsuteXSZliHZKTqnU-nWM,27257
+numpy/core/tests/test_scalarmath.pyc,,
+numpy/core/tests/test_scalarprint.py,sha256=AissnDOK_noK09FFQp-oioDk3WjgKkTuGf1-vUWDfFg,15469
+numpy/core/tests/test_scalarprint.pyc,,
+numpy/core/tests/test_shape_base.py,sha256=8NAbKisPKVc3z9CdPr_oCVRDWjjvui_sAGLCXweTkR4,24351
+numpy/core/tests/test_shape_base.pyc,,
+numpy/core/tests/test_ufunc.py,sha256=tkfbCdwV_tDSLcZl79CsTev2cjBBP-o3bTwrOYeVjEo,77403
+numpy/core/tests/test_ufunc.pyc,,
+numpy/core/tests/test_umath.py,sha256=AOt3f7EnCM8RZJ8enlkgNQ6IT8SkrYx-a98g5Lq46cM,108290
+numpy/core/tests/test_umath.pyc,,
+numpy/core/tests/test_umath_complex.py,sha256=QJHRggzmZw0eFoqyc83I8mdcU5ayPLI9kAmj3MUPEF8,19323
+numpy/core/tests/test_umath_complex.pyc,,
+numpy/core/tests/test_unicode.py,sha256=PvWt5NLjgwulCgXakHEKMJ2pSpTLbUWgz9dZExEcSJ8,13656
+numpy/core/tests/test_unicode.pyc,,
+numpy/core/umath.py,sha256=rdEijDCvJClpWvMFCUVRUlrAQQ_lq4jeNJla-hTfpFU,1919
+numpy/core/umath.pyc,,
+numpy/core/umath_tests.py,sha256=Sr6VQTbH-sOMlXy-tg1-Unht7MKaaV4wtAYR6mQYNbU,455
+numpy/core/umath_tests.pyc,,
+numpy/ctypeslib.py,sha256=-uBRC3nl-33m3g3cPEJ_yh73Z191ErweCnoWDYGWcVw,17206
+numpy/ctypeslib.pyc,,
+numpy/distutils/__config__.py,sha256=6JW9C97RVmv2Rqmvmsh704QBrLHxkyJpFHadrlll9LE,1554
+numpy/distutils/__config__.pyc,,
+numpy/distutils/__init__.py,sha256=b93HZiRpHfSC9E-GPiXk6PWDwQ3STJ4rlzvx6PhHH1k,1092
+numpy/distutils/__init__.pyc,,
+numpy/distutils/__version__.py,sha256=SSRZKvGfvg_GpYbXWtI5gaTK0NGW9nBBCyNghaaXBh8,151
+numpy/distutils/__version__.pyc,,
+numpy/distutils/_shell_utils.py,sha256=zKjy56kw6erYPK71h-afpX9HYn2ZeQHSMpvvYiVwOu0,2603
+numpy/distutils/_shell_utils.pyc,,
+numpy/distutils/ccompiler.py,sha256=ytnf5_oaWfnssvfJQr0aXntNDXUcvZLCasOnwAiYKA0,29537
+numpy/distutils/ccompiler.pyc,,
+numpy/distutils/command/__init__.py,sha256=l5r9aYwIEq1D-JJc8WFUxABk6Ip28FpRK_ok7wSLRZE,1098
+numpy/distutils/command/__init__.pyc,,
+numpy/distutils/command/autodist.py,sha256=1oytviCdsUjm3YxLLVePvWEqxyz0eDRRqvfokDm2sXQ,2048
+numpy/distutils/command/autodist.pyc,,
+numpy/distutils/command/bdist_rpm.py,sha256=rhhIyFzkd5NGi6lZaft44EBPZB3zZFRDc75klJYnbw8,775
+numpy/distutils/command/bdist_rpm.pyc,,
+numpy/distutils/command/build.py,sha256=6Q9bDubq5WfwR1K5woDFXed692szD0Rq-5Ckv2xpoK4,1618
+numpy/distutils/command/build.pyc,,
+numpy/distutils/command/build_clib.py,sha256=_Y3upI_slekgMk2CI2vplOXj5p1_aEHa-F9_nJ0HOgg,13389
+numpy/distutils/command/build_clib.pyc,,
+numpy/distutils/command/build_ext.py,sha256=QXS_Z1vrpvTrTjUb6m9n1upo9zljo4o5yd27Su5LJfQ,25949
+numpy/distutils/command/build_ext.pyc,,
+numpy/distutils/command/build_py.py,sha256=7TBGLz0va0PW6sEX-aUjsXdzvhuSbJGgIrMim1JTwu4,1210
+numpy/distutils/command/build_py.pyc,,
+numpy/distutils/command/build_scripts.py,sha256=ze19jHBhC3JggKLbL9wgs9I3mG7ls-V2NbykvleNwgQ,1731
+numpy/distutils/command/build_scripts.pyc,,
+numpy/distutils/command/build_src.py,sha256=aUl2Zf8WktMCr8f1u6YoAlblNSVGjv-lz_7yTfOKC80,30908
+numpy/distutils/command/build_src.pyc,,
+numpy/distutils/command/config.py,sha256=DxvvFqUtKPCXzHCfC2DOQcMBm67YkRdXyWeBa3bYFQE,19094
+numpy/distutils/command/config.pyc,,
+numpy/distutils/command/config_compiler.py,sha256=SKJTEk_Y_Da-dVYOHAdf4c3yXxjlE1dsr-hJxY0m0PU,4435
+numpy/distutils/command/config_compiler.pyc,,
+numpy/distutils/command/develop.py,sha256=nYM5yjhKtGKh_3wZwrvEQBLYHKldz64aU-0iSycSkXA,641
+numpy/distutils/command/develop.pyc,,
+numpy/distutils/command/egg_info.py,sha256=pdiCFQiQuIpf_xmVk9Njl7iowY9CxGn9KRbU-A9eBfg,987
+numpy/distutils/command/egg_info.pyc,,
+numpy/distutils/command/install.py,sha256=yBj3NM6sctAbG3QR5Y4qPs7YjxpW7EoKeMPEkNWf2qU,3127
+numpy/distutils/command/install.pyc,,
+numpy/distutils/command/install_clib.py,sha256=6tUO3FbF_b_e_Ly31qod9rB4yHA2z8m2mh6qry1a4yk,1315
+numpy/distutils/command/install_clib.pyc,,
+numpy/distutils/command/install_data.py,sha256=7iWTw93ty2sBPwHwg_EEhgQhZSZe6SsKdfTS9RbUR9A,914
+numpy/distutils/command/install_data.pyc,,
+numpy/distutils/command/install_headers.py,sha256=NbZwt-Joo80z_1TfxA-mIWXm2L9Mmh4ZLht7HAuveoo,985
+numpy/distutils/command/install_headers.pyc,,
+numpy/distutils/command/sdist.py,sha256=tHmlb0RzD8x04dswPXEua9H_b6GuHWY1V3hYkwJDKvA,799
+numpy/distutils/command/sdist.pyc,,
+numpy/distutils/compat.py,sha256=xzkW8JgJgGTmye34QCYTIkLfsXBvmPu4tvgCwXNdiU0,218
+numpy/distutils/compat.pyc,,
+numpy/distutils/conv_template.py,sha256=5VAAMSjzrSe_mCxzMHVW6GQZ0ATqQr5N9EFYhuTeQvg,9702
+numpy/distutils/conv_template.pyc,,
+numpy/distutils/core.py,sha256=9GNNyWDTCqfnD7Jp2tzp9vOBVyeJmF8lsgv_xdlt59g,8230
+numpy/distutils/core.pyc,,
+numpy/distutils/cpuinfo.py,sha256=AHJuQeg78_P5EReO1kLd-MAohvB-GfV8zuRh7F8hltI,23015
+numpy/distutils/cpuinfo.pyc,,
+numpy/distutils/exec_command.py,sha256=laMoxZ17D5I0cnkUce94wpfgTl1j3xWHn_A_jisdcu8,10795
+numpy/distutils/exec_command.pyc,,
+numpy/distutils/extension.py,sha256=q_NjgW-sOoeEBbeSEJwFh411mTgsF7BzGYso61Wf0qg,2967
+numpy/distutils/extension.pyc,,
+numpy/distutils/fcompiler/__init__.py,sha256=v3zk6W_xQXCI5H00aVBYDi5IgSug7zrGQKxpl74Qs_k,40154
+numpy/distutils/fcompiler/__init__.pyc,,
+numpy/distutils/fcompiler/absoft.py,sha256=AKbj5uGr8dpGDLzRIJbdUnXXAtF_5k4JqnqwTWvy-tQ,5565
+numpy/distutils/fcompiler/absoft.pyc,,
+numpy/distutils/fcompiler/compaq.py,sha256=djulalEdV6b58ofcEw14Uoq5-aNgblJMqLIzNwmJ2SE,4109
+numpy/distutils/fcompiler/compaq.pyc,,
+numpy/distutils/fcompiler/environment.py,sha256=A6AdFm6GwL5znym5qito1o3brZEgDTJs3AeCRUxu0fA,3309
+numpy/distutils/fcompiler/environment.pyc,,
+numpy/distutils/fcompiler/g95.py,sha256=K68RRAvOvyKoh-jsD9J4ZDsHltrGnJ_AllxULhy6iOE,1396
+numpy/distutils/fcompiler/g95.pyc,,
+numpy/distutils/fcompiler/gnu.py,sha256=oHipJDyfisSK9_Kdkv1Av8hDHY3UbLALgWfBO7cXkPA,20804
+numpy/distutils/fcompiler/gnu.pyc,,
+numpy/distutils/fcompiler/hpux.py,sha256=xpNfy7vCKWPnJ5M3JPnjMAewKBAfKN5hFX3hvEL2zaM,1419
+numpy/distutils/fcompiler/hpux.pyc,,
+numpy/distutils/fcompiler/ibm.py,sha256=66gCrBbbVvqu_LvdX7a9MA15NqNIY2DXPOPUFrU1zRc,3595
+numpy/distutils/fcompiler/ibm.pyc,,
+numpy/distutils/fcompiler/intel.py,sha256=WlsBtvZnLpFke7oTpMCDYFlccNSUWWkB2p422iwQURU,6861
+numpy/distutils/fcompiler/intel.pyc,,
+numpy/distutils/fcompiler/lahey.py,sha256=pJ0-xgtYwyYXgt8JlN8PFeYYEWB3vOmFkNx6UUFXzuM,1393
+numpy/distutils/fcompiler/lahey.pyc,,
+numpy/distutils/fcompiler/mips.py,sha256=IxLojWR1oi0VW93PxPpHQXRwZcYffD1dunllQW2w19A,1780
+numpy/distutils/fcompiler/mips.pyc,,
+numpy/distutils/fcompiler/nag.py,sha256=eiTvBopdCgVh5-HDTryVbRrYvf4r_Sqse1mruTt5Blo,2608
+numpy/distutils/fcompiler/nag.pyc,,
+numpy/distutils/fcompiler/none.py,sha256=N6adoFAf8inIQfCDEBzK5cGI3hLIWWpHmQXux8iJDfA,824
+numpy/distutils/fcompiler/none.pyc,,
+numpy/distutils/fcompiler/pathf95.py,sha256=Xf1JMB30PDSoNpA1Y-vKPRBeNO0XfSi0dvVQvvdjfUQ,1127
+numpy/distutils/fcompiler/pathf95.pyc,,
+numpy/distutils/fcompiler/pg.py,sha256=G0uNPfedmbkYWfChg1UbxBKqo25RenzSVJN1BUtRDw0,4232
+numpy/distutils/fcompiler/pg.pyc,,
+numpy/distutils/fcompiler/sun.py,sha256=21DQ6Rprr9rEp4pp7Np8kCwOc0Xfqdxa1iX0O-yPJPM,1643
+numpy/distutils/fcompiler/sun.pyc,,
+numpy/distutils/fcompiler/vast.py,sha256=LJ21-WIJsiquLtjdDaNsJqblwN5wuM2FZsYl1R40vN8,1733
+numpy/distutils/fcompiler/vast.pyc,,
+numpy/distutils/from_template.py,sha256=671F-qa8R1gbJUe1tCZFjw64K7J98ZnfeSV1HvWbZas,7979
+numpy/distutils/from_template.pyc,,
+numpy/distutils/info.py,sha256=lNxUhbJnzWjA47P2I_9NW-tuVrjGzL62jHDlQJ3pp6E,157
+numpy/distutils/info.pyc,,
+numpy/distutils/intelccompiler.py,sha256=1qzr6PMxi0UkR0NUY3rt3gqww9GwJ-Gbe91yxQKlieU,4291
+numpy/distutils/intelccompiler.pyc,,
+numpy/distutils/lib2def.py,sha256=RWD0EpuUHoxIuc9VyyDCH2d73jgsdGG2PBKVisanlVU,3502
+numpy/distutils/lib2def.pyc,,
+numpy/distutils/line_endings.py,sha256=aBO2e754iin4Ylo7FNwlBg6nPudXMnQZYdhVhf-E3aA,2053
+numpy/distutils/line_endings.pyc,,
+numpy/distutils/log.py,sha256=yHzdtNdTg6YtvO50Hu-Le5WJ7Typ2TvaCYabelTaUO0,2745
+numpy/distutils/log.pyc,,
+numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77
+numpy/distutils/mingw32ccompiler.py,sha256=4fU0Qe_BcIZOFnjwy423AfEHXVepbyayWu89-UxDGGY,25178
+numpy/distutils/mingw32ccompiler.pyc,,
+numpy/distutils/misc_util.py,sha256=vUYKvhcqAd10VZOCm0Gw2heGfVTnndNj64BYiPbtbQA,82905
+numpy/distutils/misc_util.pyc,,
+numpy/distutils/msvc9compiler.py,sha256=TuPYjPFp3nYQSIG1goNxuOly7o3VMx-H35POMpycB3k,2258
+numpy/distutils/msvc9compiler.pyc,,
+numpy/distutils/msvccompiler.py,sha256=7EUlHbgdKBBJG3AzgE94AQeUFnj0HcD6M7_YPN7vdCs,1994
+numpy/distutils/msvccompiler.pyc,,
+numpy/distutils/npy_pkg_config.py,sha256=k3lxSOC_InRBSGddbfbvMLRTGqnE-LliNXakwdZ3AH8,13154
+numpy/distutils/npy_pkg_config.pyc,,
+numpy/distutils/numpy_distribution.py,sha256=lbnEW1OxWxC_1n2sKd0Q3fC5QnNdFuAkNAlvXF99zIQ,700
+numpy/distutils/numpy_distribution.pyc,,
+numpy/distutils/pathccompiler.py,sha256=FjNouOTL8u4gLMbJW7GdT0RlsD2nXV1_SEBNZj9QdpQ,779
+numpy/distutils/pathccompiler.pyc,,
+numpy/distutils/setup.py,sha256=q3DcCZNkK_jHsC0imocewd4uCKQWWXjkzd4nkBmkMFI,611
+numpy/distutils/setup.pyc,,
+numpy/distutils/system_info.py,sha256=vwtulLGu7HBcUtEjlBkssLCy0vxlUx_2yNBmf0eLD_U,89628
+numpy/distutils/system_info.pyc,,
+numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/distutils/tests/__init__.pyc,,
+numpy/distutils/tests/test_exec_command.py,sha256=SH9RaWmUnSu8uGEjwyoTrzqoVPclTUnM6UTGSKxW8qc,7146
+numpy/distutils/tests/test_exec_command.pyc,,
+numpy/distutils/tests/test_fcompiler.py,sha256=t26JUHwJxl_m-ALPa5XABgVFrMthBw7gRKN4yYX1hYQ,2678
+numpy/distutils/tests/test_fcompiler.pyc,,
+numpy/distutils/tests/test_fcompiler_gnu.py,sha256=O57uCEHeQIS0XF8GloEas3OlaOfmIHDWEtgYS_q3x48,2218
+numpy/distutils/tests/test_fcompiler_gnu.pyc,,
+numpy/distutils/tests/test_fcompiler_intel.py,sha256=fOjd_jv0Od6bZyzFf4YpZMcnFva0OZK7yJV_4Hebb6A,1140
+numpy/distutils/tests/test_fcompiler_intel.pyc,,
+numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=5-Num0A3cN7_NS3BlAgYt174S-OGOWRLL9rXtv-h_fA,1176
+numpy/distutils/tests/test_fcompiler_nagfor.pyc,,
+numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103
+numpy/distutils/tests/test_from_template.pyc,,
+numpy/distutils/tests/test_misc_util.py,sha256=8LIm12X83HmvgmpvJJ9inaU7FlGt287VwDM-rMKCOv4,3316
+numpy/distutils/tests/test_misc_util.pyc,,
+numpy/distutils/tests/test_npy_pkg_config.py,sha256=wa0QMQ9JAye87t2gDbFaBHp0HGpNFgwxJrJ30ZrHvNk,2639
+numpy/distutils/tests/test_npy_pkg_config.pyc,,
+numpy/distutils/tests/test_shell_utils.py,sha256=we9P8AvjCQky1NRDP3sXAJnNUek7rDmMR4Ar9cg9iSk,2030
+numpy/distutils/tests/test_shell_utils.pyc,,
+numpy/distutils/tests/test_system_info.py,sha256=Asv6c-N1I2JQHAcBZuObsoBlaaKOVepkhyFAmGp0zow,7730
+numpy/distutils/tests/test_system_info.pyc,,
+numpy/distutils/unixccompiler.py,sha256=M7Hn3ANMo8iP-sZtSAebI3RCLp0ViRYxawAbck0hlQM,5177
+numpy/distutils/unixccompiler.pyc,,
+numpy/doc/__init__.py,sha256=BDpxTM0iw2F4thjBkYqjIXX57F5KfIaH8xMd67N6Jh0,574
+numpy/doc/__init__.pyc,,
+numpy/doc/basics.py,sha256=5ygY_jESZpg9TENO0dszCttlq5v5IXvXjL2pWg5Atms,9658
+numpy/doc/basics.pyc,,
+numpy/doc/broadcasting.py,sha256=0uofJxPfkwsaQaTSju8TwiOpsmXSw2F3bzG8CdkKviU,5603
+numpy/doc/broadcasting.pyc,,
+numpy/doc/byteswapping.py,sha256=ivf9jUApDmMijOj1f5BGYkGCRVh4OLa_Wybbcl3A9Zw,5349
+numpy/doc/byteswapping.pyc,,
+numpy/doc/constants.py,sha256=G-xVDfqRId16dKXe1Owy6-tlexIzJUTuir2yu3iQgMc,9290
+numpy/doc/constants.pyc,,
+numpy/doc/creation.py,sha256=6FUALDWgqPWObcW-ZHDQMAnfo42I60rRR9pDpwb4-YE,5496
+numpy/doc/creation.pyc,,
+numpy/doc/glossary.py,sha256=D5Ljv1ZOOIj-O0Peg2E2QaUBV3mZb60bJOI_4gQmFTo,13583
+numpy/doc/glossary.pyc,,
+numpy/doc/indexing.py,sha256=qhhsiAeG_7Y2rgziwj515Fsw0wFL4dq1quT-ja3-0zs,15669
+numpy/doc/indexing.pyc,,
+numpy/doc/internals.py,sha256=xYp6lv4yyV0ZIo_qCvLCAWxDa0rhu7FNrTmpXY1isO4,9669
+numpy/doc/internals.pyc,,
+numpy/doc/misc.py,sha256=JWJqyiYL2qoSMVAb0QC8w_Pm5l7ZLxx2Z9D5ilgU4Uo,6191
+numpy/doc/misc.pyc,,
+numpy/doc/structured_arrays.py,sha256=Kr2n-4TQjfVVBoOLt3Lv30e6j5et9y8zYXZx3wg_hCc,26108
+numpy/doc/structured_arrays.pyc,,
+numpy/doc/subclassing.py,sha256=AqtEltybX__ghj91b73QgXcGpYd8gGlwoO-R7SQDwe8,28561
+numpy/doc/subclassing.pyc,,
+numpy/doc/ufuncs.py,sha256=vsAkCLEMh7Qa_3x4WbDMY3IQsDCLdOCuB_6P2aEcVLg,5427
+numpy/doc/ufuncs.pyc,,
+numpy/dual.py,sha256=SZ3DLWXQFv1lRKN1TlG487xmexpJFa7faaBdnnexm3E,1865
+numpy/dual.pyc,,
+numpy/f2py/__init__.py,sha256=nHuShe3wj5HQ2Xyb42DEorG3DK63HaXRNUizZgqB83g,3101
+numpy/f2py/__init__.pyc,,
+numpy/f2py/__main__.py,sha256=mnksAcMyLdK0So_DseQn0zalhnA7LflS7hHvo7QCVjU,134
+numpy/f2py/__main__.pyc,,
+numpy/f2py/__version__.py,sha256=rEHB9hlWmpryhNa0EmMnlAlDCGI4GXILC9CZUEV3Wew,254
+numpy/f2py/__version__.pyc,,
+numpy/f2py/auxfuncs.py,sha256=mDvaBo3Y8tYpXLZfq8DCv6UZ3-2JqWc_iNBZRxGesb0,21826
+numpy/f2py/auxfuncs.pyc,,
+numpy/f2py/capi_maps.py,sha256=FgizIHORFdaX5eIVZEQSlC9kVAidh0jfKoJYMK4Z86E,31416
+numpy/f2py/capi_maps.pyc,,
+numpy/f2py/cb_rules.py,sha256=un1xn8goj4jFL8FzxRwWSAzpr0CVcvwObVUKdIGJyaA,22946
+numpy/f2py/cb_rules.pyc,,
+numpy/f2py/cfuncs.py,sha256=NRxuXAaryWHOFh5205BvvDjajituolH6FvtsumCltvI,45114
+numpy/f2py/cfuncs.pyc,,
+numpy/f2py/common_rules.py,sha256=DOCOo4brpFaKNll8hOjG_vCYuOfKyTBYMItaDC_osEc,4981
+numpy/f2py/common_rules.pyc,,
+numpy/f2py/crackfortran.py,sha256=eiNE5IpgI8pYEocd2R3U7iays2lTid0x8ClkAsKB67Q,128927
+numpy/f2py/crackfortran.pyc,,
+numpy/f2py/diagnose.py,sha256=VNuNTGnQaXn9Fn2jlueYt47634CvLQSaAWJWy_Nxwnw,5295
+numpy/f2py/diagnose.pyc,,
+numpy/f2py/f2py2e.py,sha256=w9zSJG3tnCMyOrgZJqhZiEUoAnnI7oU61kYZzvsLdfo,23983
+numpy/f2py/f2py2e.pyc,,
+numpy/f2py/f2py_testing.py,sha256=8rkBjUsNhBavpoBgi_bqDS8H8tBdd5BR8hrE6ENsIAo,1523
+numpy/f2py/f2py_testing.pyc,,
+numpy/f2py/f90mod_rules.py,sha256=YFK4MPkGHBxshAInbcapnumX3qlu0h6ya6GQpS8zWLk,9850
+numpy/f2py/f90mod_rules.pyc,,
+numpy/f2py/func2subr.py,sha256=Oy12rqUa1vcXvzR6g8yx8jSYDwfKt5Jqiebf1QaWX1o,9224
+numpy/f2py/func2subr.pyc,,
+numpy/f2py/info.py,sha256=Mk1-neqpqYQ6njoVUCKHmMkyFkAqYeWH4cGZr8NfKiI,136
+numpy/f2py/info.pyc,,
+numpy/f2py/rules.py,sha256=WijCZZXIQSbV5wRHGGgjUvQlh4gQ9tKaqbSYTjvOyRk,58526
+numpy/f2py/rules.pyc,,
+numpy/f2py/setup.py,sha256=qNCIqRPcpEUhJBjihtEXEe4Iil4XDYVRAI_sZm7xZhM,2444
+numpy/f2py/setup.pyc,,
+numpy/f2py/src/fortranobject.c,sha256=VtPYFyh0jv5N432cFTZGonH7qwkUWJRdDUwZZ6RnqnU,35984
+numpy/f2py/src/fortranobject.h,sha256=ltMxueNeETQtEYSA_E7bpRtF8Jj1xuOBS-YNhjBMfOw,5227
+numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/f2py/tests/__init__.pyc,,
+numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=L_Z0GMyfYIoRiS0FIBlzy_nxlFoNbkLAatObVmKeGsk,9025
+numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29
+numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460
+numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499
+numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269
+numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130
+numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224
+numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347
+numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85
+numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179
+numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139
+numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939
+numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469
+numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612
+numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609
+numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610
+numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277
+numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815
+numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618
+numpy/f2py/tests/test_array_from_pyobj.py,sha256=gLSX9JuF_8NNboUQRzRF3IYC7pWJ06Mw8m6sy2wQvCQ,22083
+numpy/f2py/tests/test_array_from_pyobj.pyc,,
+numpy/f2py/tests/test_assumed_shape.py,sha256=QhSsSJ4gzrgACSO-dyasMPhJSfa7PzDAxAd9yN0M6zI,949
+numpy/f2py/tests/test_assumed_shape.pyc,,
+numpy/f2py/tests/test_block_docstring.py,sha256=lbRnFEGeseQ-WP9grC8Y4J6xKu-Nwgp0_fSVuYn19Hw,568
+numpy/f2py/tests/test_block_docstring.pyc,,
+numpy/f2py/tests/test_callback.py,sha256=d5366rZPJMowFLd7AiwUMRdhKTe8BMeakcp1plQNBJI,3994
+numpy/f2py/tests/test_callback.pyc,,
+numpy/f2py/tests/test_common.py,sha256=tLmi1JrfwFdTcBlUInxTn04f6Hf8eSB00sWRoKJvHrM,868
+numpy/f2py/tests/test_common.pyc,,
+numpy/f2py/tests/test_compile_function.py,sha256=gQG9PeMaXCcjlc233cEJb5eMoNFfvzAdkHf8qb7Css0,4256
+numpy/f2py/tests/test_compile_function.pyc,,
+numpy/f2py/tests/test_kind.py,sha256=G6u6EWjVHenmPju3RQCa9bSeCJGDul3VyXFgp2_Yc7w,1078
+numpy/f2py/tests/test_kind.pyc,,
+numpy/f2py/tests/test_mixed.py,sha256=nUgGcvcbyd_NB6NuwFCIH8ze5eeMztC-fE5lCHXc9Bg,931
+numpy/f2py/tests/test_mixed.pyc,,
+numpy/f2py/tests/test_parameter.py,sha256=_wX-gM-XGxA_mfDBM8np9NLjYiCF6LJbglwKf09JbdM,3976
+numpy/f2py/tests/test_parameter.pyc,,
+numpy/f2py/tests/test_quoted_character.py,sha256=Q0oDtl3STQqzSap5VYPpfzJJ72NtQchm6Vg-bwuoBl4,1029
+numpy/f2py/tests/test_quoted_character.pyc,,
+numpy/f2py/tests/test_regression.py,sha256=lPQUKx5RrVtGhyIvIcWS5GgA_CgQypabuuna-Q1z3hs,764
+numpy/f2py/tests/test_regression.pyc,,
+numpy/f2py/tests/test_return_character.py,sha256=4a_JeEtY1AkT-Q-01iaZyqWLDGmZGW17d88JNFZoXTc,3864
+numpy/f2py/tests/test_return_character.pyc,,
+numpy/f2py/tests/test_return_complex.py,sha256=FO4oflCncNIft36R3Fe9uiyDtryiB-_d2PLMH3x64I4,4779
+numpy/f2py/tests/test_return_complex.pyc,,
+numpy/f2py/tests/test_return_integer.py,sha256=cyyAbyHUepwYeyXlgIa2FD4B7A2dHnpp2jwx8ZDQiZQ,4749
+numpy/f2py/tests/test_return_integer.pyc,,
+numpy/f2py/tests/test_return_logical.py,sha256=u3dazkOU1oz9kZKYXBd2GWaEr02MYfjGdLrb7kT8MiY,4974
+numpy/f2py/tests/test_return_logical.pyc,,
+numpy/f2py/tests/test_return_real.py,sha256=QVRKzeO44ZuIlV8EycmtXaHT_i0rnX2bi3rOh7py4GM,5619
+numpy/f2py/tests/test_return_real.pyc,,
+numpy/f2py/tests/test_semicolon_split.py,sha256=v7YFx-oTbXUZZ4qjdblCYeVVtkD1YYa4CbuEf2LTOLs,1580
+numpy/f2py/tests/test_semicolon_split.pyc,,
+numpy/f2py/tests/test_size.py,sha256=GV7S4tl8FhK60T_EpX86yVQo_bMVTdyOTB8fGVIQ24o,1352
+numpy/f2py/tests/test_size.pyc,,
+numpy/f2py/tests/test_string.py,sha256=LTQC9AFVsUAuJVFuH3Wltl-NfFIilVl0KvBNnEgdnmo,676
+numpy/f2py/tests/test_string.pyc,,
+numpy/f2py/tests/util.py,sha256=u06FJvpEGZM6P9WaZWkfTxR5TSdjCm7eXku45MO5R_o,9436
+numpy/f2py/tests/util.pyc,,
+numpy/f2py/use_rules.py,sha256=L6nTSJnxougQ2PVAzR7s-1spidcfDp9tzLIFAJe3gUI,3652
+numpy/f2py/use_rules.pyc,,
+numpy/fft/__init__.py,sha256=KGWBTdw_6ckUIfniIdikkgBwDy8riaGID8x4cdOf_Ds,252
+numpy/fft/__init__.pyc,,
+numpy/fft/fftpack.py,sha256=lsjJM82Zdhh9t68dQrzO6iPihdgE_QK3GtSSfpYEcxI,47089
+numpy/fft/fftpack.pyc,,
+numpy/fft/fftpack_lite.so,sha256=SHB5OV2xfkGP6xHoRsRM6r2F39PpCrYdhlkhrZ-YtQE,149826
+numpy/fft/helper.py,sha256=6Q_SGRP2hukwWRJjiL5OidSfdJIkvZo7AePSqYMLgJI,9710
+numpy/fft/helper.pyc,,
+numpy/fft/info.py,sha256=831NwiCI33uiLx21G7kFCwzZuFxDfmU8n-2LG4FJm2w,7235
+numpy/fft/info.pyc,,
+numpy/fft/setup.py,sha256=VR1boee7xZd3lOQVRJ3083I0kYqq_-RCo6CK6UK8Lso,550
+numpy/fft/setup.pyc,,
+numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/fft/tests/__init__.pyc,,
+numpy/fft/tests/test_fftpack.py,sha256=Ub9oaoyEILrev0kZoEh0hTaYlf-orOUtfKIsNbANwoU,7019
+numpy/fft/tests/test_fftpack.pyc,,
+numpy/fft/tests/test_helper.py,sha256=q8y7x0IaXRW2ai0dCEmhW48MRkQ6k8bSegti5gCE6ac,9774
+numpy/fft/tests/test_helper.pyc,,
+numpy/lib/__init__.py,sha256=NgwUuJaACbQF-qd9VLy6CEPwcZHk1GEdWPW0UhZPQl8,1358
+numpy/lib/__init__.pyc,,
+numpy/lib/_datasource.py,sha256=qKNDjPS0q8CJ4HD9I6IPgJEmr2MN2m-yinyYOJBqZ1w,25523
+numpy/lib/_datasource.pyc,,
+numpy/lib/_iotools.py,sha256=JERUFZ1Xja8mq9T7AQdtO2RwoblLmmDcfhnL9iT0xOE,32683
+numpy/lib/_iotools.pyc,,
+numpy/lib/_version.py,sha256=8ouI5DbgX1RuNbPhVX_Fn14_v7ZiwwQ1grQPX3_bXBs,4866
+numpy/lib/_version.pyc,,
+numpy/lib/arraypad.py,sha256=dAhAzdE0AXL26VMyOj0XIzh_XqcjWv6SDSDaAGSFxyQ,45097
+numpy/lib/arraypad.pyc,,
+numpy/lib/arraysetops.py,sha256=gpQmyj62NhyGms41X4J0L2qF5apzPqSCRsSN1Os8AVo,24175
+numpy/lib/arraysetops.pyc,,
+numpy/lib/arrayterator.py,sha256=niYNI2qhySUT5j_3gl07pPbkmY4GJqdwIGaMZyPil84,7191
+numpy/lib/arrayterator.pyc,,
+numpy/lib/financial.py,sha256=fYFVxvZmJJUxyWarmOpNz8rhFfs_Gv3vx8iB9L2LVwc,25985
+numpy/lib/financial.pyc,,
+numpy/lib/format.py,sha256=-LoQbL9aFfVv38VLi99Pga5zY1tO4qdsog7VGc2N4wE,30723
+numpy/lib/format.pyc,,
+numpy/lib/function_base.py,sha256=oCjNZGSGegRLTaEMmOJwYoTTWR1eZuVEO9HJLCk26Gc,156000
+numpy/lib/function_base.pyc,,
+numpy/lib/histograms.py,sha256=0FviiX5FbM5sxHjbHHE54LQLday_AoX4LCF_N8_x4bk,39375
+numpy/lib/histograms.pyc,,
+numpy/lib/index_tricks.py,sha256=OymHt6Mzi16XZBWSvLdnwGr9ywaPker-3Dq2vQHXN7I,29087
+numpy/lib/index_tricks.pyc,,
+numpy/lib/info.py,sha256=oVczF_pC_CMZC2h2adb2HHza_1qF3qI065j4RBrd-I4,6616
+numpy/lib/info.pyc,,
+numpy/lib/mixins.py,sha256=GeOiq01E663Z_06xQfIUYKpl2JPkswqhaQEernjnO_Q,7268
+numpy/lib/mixins.pyc,,
+numpy/lib/nanfunctions.py,sha256=7LsFmh0meOwBJpzoNnR1V8e2nAJv61A8Ib9EWmYFRLg,57741
+numpy/lib/nanfunctions.pyc,,
+numpy/lib/npyio.py,sha256=PqeSplzcbS7aZQdlYTxYAFPfJkPwJuS1I1f_sfEEpO0,84661
+numpy/lib/npyio.pyc,,
+numpy/lib/polynomial.py,sha256=Gm-XIvalJS2B1KMRvyBddziWHtd8ZCmWfxZlJ0kOiyc,40292
+numpy/lib/polynomial.pyc,,
+numpy/lib/recfunctions.py,sha256=OQ3lGqoHCauSXqgGJaGH0Bl1eNV_SdoAz02P0_7L2lw,54851
+numpy/lib/recfunctions.pyc,,
+numpy/lib/scimath.py,sha256=axf_K8DphkbuKZXA6K2A5fbyIu1BdKQ6P74iFW9YpTc,14698
+numpy/lib/scimath.pyc,,
+numpy/lib/setup.py,sha256=os9eV9wSzwTQlfxeoQ33gYQ4wOj1_6EvqcROc8PyGbE,379
+numpy/lib/setup.pyc,,
+numpy/lib/shape_base.py,sha256=T1RaVDs9X7GUBclWBf2SZkLZhY3xbpctMv8rtt0gdM0,37967
+numpy/lib/shape_base.pyc,,
+numpy/lib/stride_tricks.py,sha256=P7koCHdGLg31K2aQPIPcAmqLKKsnY-HZw_eS3hqUpZA,9123
+numpy/lib/stride_tricks.pyc,,
+numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/lib/tests/__init__.pyc,,
+numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258
+numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366
+numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341
+numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449
+numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96
+numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96
+numpy/lib/tests/test__datasource.py,sha256=5LwfmvIysaLHlCYkmsj46S7YRF2zRG4BmKSjjJr6fdE,11463
+numpy/lib/tests/test__datasource.pyc,,
+numpy/lib/tests/test__iotools.py,sha256=7ocNK0I-XKtiJLxnq2Fq_Yszi-e-70Km7crI28Jyqww,13714
+numpy/lib/tests/test__iotools.pyc,,
+numpy/lib/tests/test__version.py,sha256=eCeeSqb8G3WNtCgkM3XGz9Zszyye-KFDlNQ7EY2J_UY,2055
+numpy/lib/tests/test__version.pyc,,
+numpy/lib/tests/test_arraypad.py,sha256=ZIcLFH5ykizK2XT6WN1f9CyR48tqHQnx_b2Ojurs7eo,49320
+numpy/lib/tests/test_arraypad.pyc,,
+numpy/lib/tests/test_arraysetops.py,sha256=WsH_vJI5guzG9Mix0y-kwVceTZ8e-aKJxEaf_RNTcbE,22157
+numpy/lib/tests/test_arraysetops.pyc,,
+numpy/lib/tests/test_arrayterator.py,sha256=run7iWWbvoHGGsDv_uB6G8QENFzOCSgUIxAMVp7ZMu4,1357
+numpy/lib/tests/test_arrayterator.pyc,,
+numpy/lib/tests/test_financial.py,sha256=8cCdlpNixwl1Wrgblemxi3ndTxX_Sq2yr-30lSURnq0,17098
+numpy/lib/tests/test_financial.pyc,,
+numpy/lib/tests/test_format.py,sha256=m8X4eNPyby2wOIENzkW8apHugUi5WRkzHZO15h9MSDU,35376
+numpy/lib/tests/test_format.pyc,,
+numpy/lib/tests/test_function_base.py,sha256=3qeRuK68MKs7Xkxe61M2ozQkCiSnTDpGFeJbrp807Uc,117817
+numpy/lib/tests/test_function_base.pyc,,
+numpy/lib/tests/test_histograms.py,sha256=0EhLLKOADkyQZTucCcXfYyAC5X1n5_vglMid0VJO-DE,33415
+numpy/lib/tests/test_histograms.pyc,,
+numpy/lib/tests/test_index_tricks.py,sha256=Ql-AUXqIgQSdLjbeRI7GmaiVVtdgH0YoR0m5DGS_V-g,16322
+numpy/lib/tests/test_index_tricks.pyc,,
+numpy/lib/tests/test_io.py,sha256=Z2bkX6n7a7lIKS4hfWxf20XyCFQfjFK8PAjEocnpOI0,96988
+numpy/lib/tests/test_io.pyc,,
+numpy/lib/tests/test_mixins.py,sha256=YNIKF716Jz7V8FJ8Zzww_F6laTD8j3A6SBxCXqt6rAQ,7233
+numpy/lib/tests/test_mixins.pyc,,
+numpy/lib/tests/test_nanfunctions.py,sha256=wS-i0JsIwMb0p35vW2qGC_dpewDFVzI2YTnJ25pr1O8,36179
+numpy/lib/tests/test_nanfunctions.pyc,,
+numpy/lib/tests/test_packbits.py,sha256=W4gtoYBa5LbevvbXL9lvrZRT5Wt1fqyI3J7oDLtza_A,12851
+numpy/lib/tests/test_packbits.pyc,,
+numpy/lib/tests/test_polynomial.py,sha256=C7XhyAVCbh_5_HWiZ5XPEVOq_WGtBa8hvmcmVYE3QDc,10055
+numpy/lib/tests/test_polynomial.pyc,,
+numpy/lib/tests/test_recfunctions.py,sha256=3ExnP2zwR3y9mO3rZ_ueqIQ5aMitvje76U-xWUG_PjY,38471
+numpy/lib/tests/test_recfunctions.pyc,,
+numpy/lib/tests/test_regression.py,sha256=96pKecYGHPZwAoHV3_kLvl3gIb0PN0m33R0H3dd7uSk,8472
+numpy/lib/tests/test_regression.pyc,,
+numpy/lib/tests/test_shape_base.py,sha256=nVUzbHADBmrOaOHeh5fA27gjxsnOC3r-S5lyo1n5MV8,23979
+numpy/lib/tests/test_shape_base.pyc,,
+numpy/lib/tests/test_stride_tricks.py,sha256=HUp9YL7eBTRfT8gs6iraMl6M3YvoDxfFmkkwwmroing,15392
+numpy/lib/tests/test_stride_tricks.pyc,,
+numpy/lib/tests/test_twodim_base.py,sha256=toC7eTjEuZxygJwQub0tC7_uGCtLVVMCHfa6EUkGJU4,17524
+numpy/lib/tests/test_twodim_base.pyc,,
+numpy/lib/tests/test_type_check.py,sha256=KxnoWjY3iGTnr0pDWEah73ZAx_6a85S9SSnkKIG-sn0,13509
+numpy/lib/tests/test_type_check.pyc,,
+numpy/lib/tests/test_ufunclike.py,sha256=VFt_8BDH7q80yXmYJSn1crolIMizKFN3mAJcigaazLU,3350
+numpy/lib/tests/test_ufunclike.pyc,,
+numpy/lib/tests/test_utils.py,sha256=kIH7i6N_Gtsk8FgIHTuYvASeGxQB15UYPJwqvWBPWkY,2474
+numpy/lib/tests/test_utils.pyc,,
+numpy/lib/twodim_base.py,sha256=sFRiYvhrOG9EY_YMlTblXb4aJKZaE3WLUx1WBKoIgG4,27339
+numpy/lib/twodim_base.pyc,,
+numpy/lib/type_check.py,sha256=XsSXtj8bHk22iq4NNZpwqMKPM9FoddtYNWKFi63oZqc,18073
+numpy/lib/type_check.pyc,,
+numpy/lib/ufunclike.py,sha256=1df-LT8UlC_SRmc06DhAnsUZLHROx0p56jw6GUwcap8,7156
+numpy/lib/ufunclike.pyc,,
+numpy/lib/user_array.py,sha256=7nJPlDfP-04Lcq8iH_cqBbSEsx5cHCcj-2Py-oh-5t0,7817
+numpy/lib/user_array.pyc,,
+numpy/lib/utils.py,sha256=L_JtNhuRwM6_4YyhM5jCpeH36j_lj5-t6XJT3pB4In0,36161
+numpy/lib/utils.pyc,,
+numpy/linalg/__init__.py,sha256=P2q5fyWhZEc-xhcruFEcHWmYhSBOWSr63i9UjE8x3fk,2326
+numpy/linalg/__init__.pyc,,
+numpy/linalg/_umath_linalg.so,sha256=o-tN4TdHWHggXAxfA1nBDiHBJqqyuW6o09zBFoFwzAw,872112
+numpy/linalg/info.py,sha256=AbXPYYabJK5In0F9IMk-oVWZgDyEaoU45Wnq6RtuCJs,1198
+numpy/linalg/info.pyc,,
+numpy/linalg/lapack_lite.so,sha256=BNhR9YGqEaCh68JU5TlJnmxJGqZdgZ7_ZCS_lQwRay4,108832
+numpy/linalg/linalg.py,sha256=rZuKNk2u7c65Cp8yssB1BEeCSPauDClcgYF6N4dY2So,85094
+numpy/linalg/linalg.pyc,,
+numpy/linalg/setup.py,sha256=k1X4EfRWACFtJYfb8Wiol_-pPnEMtqURxQ8H9FwFHWg,1878
+numpy/linalg/setup.pyc,,
+numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/linalg/tests/__init__.pyc,,
+numpy/linalg/tests/test_build.py,sha256=xKcJ8JmGk-zTqxxMhDX5GFsw-ptn8uwOUOcxaTUuPHc,1704
+numpy/linalg/tests/test_build.pyc,,
+numpy/linalg/tests/test_deprecations.py,sha256=eGYDVF3rmGQyDEMGOc-p_zc84Cx1I3jQPyaJe7xOvEc,706
+numpy/linalg/tests/test_deprecations.pyc,,
+numpy/linalg/tests/test_linalg.py,sha256=y87Nka03PZAqUl5GqzOb4LsLbgbGib5V9ehdoTIPLRI,69977
+numpy/linalg/tests/test_linalg.pyc,,
+numpy/linalg/tests/test_regression.py,sha256=zz7lprqDg7yU-z1d6AOdCDH3Tjqgw82QGiaPM7peixY,5671
+numpy/linalg/tests/test_regression.pyc,,
+numpy/ma/__init__.py,sha256=fcmMCElT3MmCkjIGVhXyEAbjuWe_j1NVUiE65eAMvy0,1470
+numpy/ma/__init__.pyc,,
+numpy/ma/bench.py,sha256=q3y_e1wpHVEdg0iIxrBshWVt2LOFfYi6q-eIJ3RSVrU,4942
+numpy/ma/bench.pyc,,
+numpy/ma/core.py,sha256=UVL30fxCdddsnMlLOgcCxl-ca_-Iqm6uyN8QHnczcH4,256431
+numpy/ma/core.pyc,,
+numpy/ma/extras.py,sha256=iNaY5jpgYLssKzzgS7FfhlZ3BM59gjLbsNb7CUylLDU,56986
+numpy/ma/extras.pyc,,
+numpy/ma/mrecords.py,sha256=j8EituvbyOFG5oiTwHBnVdQX1mhD_qByBezBeB_R1hM,26937
+numpy/ma/mrecords.pyc,,
+numpy/ma/setup.py,sha256=zkieH8BeiGVXl3Wlt_WeP9kciZlyAZY20DDu4SGk4b4,429
+numpy/ma/setup.pyc,,
+numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/ma/tests/__init__.pyc,,
+numpy/ma/tests/test_core.py,sha256=pM6Wn3jxw6cPV7wFlNKWyyAYFDAGAm0oYtw2txBudKw,196219
+numpy/ma/tests/test_core.pyc,,
+numpy/ma/tests/test_deprecations.py,sha256=StN-maPV6dwIPn1LmJ_Fd9l_Ysrbzvl8BZy6zYeUru8,2340
+numpy/ma/tests/test_deprecations.pyc,,
+numpy/ma/tests/test_extras.py,sha256=f7wFcowBM60pnNwIJ221W9X6AWNS9pRK_VjVXV5Wqio,66791
+numpy/ma/tests/test_extras.pyc,,
+numpy/ma/tests/test_mrecords.py,sha256=SX0_-SgRqIQBlPaiDNVD0-oBAot0N9aLPWM7Gj4U804,19966
+numpy/ma/tests/test_mrecords.pyc,,
+numpy/ma/tests/test_old_ma.py,sha256=85NJMKj-TG8WGSMFT2KbDEsBsrgV58URkFUd1v9iuBc,32351
+numpy/ma/tests/test_old_ma.pyc,,
+numpy/ma/tests/test_regression.py,sha256=AGAA97e9_0q1VHSTOx6qIsh1qA56GzEKhWZWveuHf3w,2993
+numpy/ma/tests/test_regression.pyc,,
+numpy/ma/tests/test_subclassing.py,sha256=GEqxbqfodv1823cRitfJK3qMWTbDWIpv2HwuVB_kpXk,12997
+numpy/ma/tests/test_subclassing.pyc,,
+numpy/ma/testutils.py,sha256=meyy8_0sx4g2sebsVO1PrFSc6ogLzEU7vjOuu2VjY1U,10365
+numpy/ma/testutils.pyc,,
+numpy/ma/timer_comparison.py,sha256=Q1AyfHzNrWzVTrx6ebL9HgpQEkEJPHAkbWuTK_0bBkQ,15586
+numpy/ma/timer_comparison.pyc,,
+numpy/ma/version.py,sha256=KpJAmUE1s1TpbgqgdBpDoslxm7kOMpczLjEzLMGv9Ag,380
+numpy/ma/version.pyc,,
+numpy/matlib.py,sha256=bfk5RflWhOjnBKhpU4L-WDafyzoNIy5-K-8MMyIauN8,9809
+numpy/matlib.pyc,,
+numpy/matrixlib/__init__.py,sha256=W-2bi7zuMWQY5U1ikwfaBPubrcYkbxzPzzIeYz3RYPA,284
+numpy/matrixlib/__init__.pyc,,
+numpy/matrixlib/defmatrix.py,sha256=1tR1FsgapRQ2XbiAvV4ik4mwkKLJA1y6ABeztSeYr2k,30660
+numpy/matrixlib/defmatrix.pyc,,
+numpy/matrixlib/setup.py,sha256=7DS-rWnyWlLTuOj31UuhkyW8QhLQ7KD5wirtWT_DUhc,437
+numpy/matrixlib/setup.pyc,,
+numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/matrixlib/tests/__init__.pyc,,
+numpy/matrixlib/tests/test_defmatrix.py,sha256=FRkFPpDpgUEzEAgShORCVhPOuqclxBftHyEW5z2oV4o,15315
+numpy/matrixlib/tests/test_defmatrix.pyc,,
+numpy/matrixlib/tests/test_interaction.py,sha256=y0ldcMIKCeT_tRo_uON6Cvxuff-M4MxmqnzA0kDFHYU,12179
+numpy/matrixlib/tests/test_interaction.pyc,,
+numpy/matrixlib/tests/test_masked_matrix.py,sha256=4uslUEOdw_ACZ9R-VKX2m82HlDeNFrBs-zP8uuWj5gI,8962
+numpy/matrixlib/tests/test_masked_matrix.pyc,,
+numpy/matrixlib/tests/test_matrix_linalg.py,sha256=XYsAcC02YgvlfqAQOLY2hOuggeRlRhkztNsLYWGb4QQ,2125
+numpy/matrixlib/tests/test_matrix_linalg.pyc,,
+numpy/matrixlib/tests/test_multiarray.py,sha256=jM-cFU_ktanoyJ0ScRYv5xwohhE3pKpVhBBtd31b-IQ,628
+numpy/matrixlib/tests/test_multiarray.pyc,,
+numpy/matrixlib/tests/test_numeric.py,sha256=YPq5f11MUAV6WcLQbl8xKWcm17lMj9SJ09mamqGCpxA,515
+numpy/matrixlib/tests/test_numeric.pyc,,
+numpy/matrixlib/tests/test_regression.py,sha256=ou1TP5bFNpjRaL2-zQxzS11ChwvAkCVp3k71SBtOO9M,1001
+numpy/matrixlib/tests/test_regression.pyc,,
+numpy/polynomial/__init__.py,sha256=boBgsbz2Rr49pBTyGNT3TnLRTPSauyjBNeCVGek7oUM,1134
+numpy/polynomial/__init__.pyc,,
+numpy/polynomial/_polybase.py,sha256=GEr4yD6OnPtNo98Mf0p5r8vt_nEmTeqTRG3pljAADbQ,32779
+numpy/polynomial/_polybase.pyc,,
+numpy/polynomial/chebyshev.py,sha256=79hcfdqUHgAtBofN4ZZHTkOJNzYeJ12KsCgZm4zng6I,67424
+numpy/polynomial/chebyshev.pyc,,
+numpy/polynomial/hermite.py,sha256=14FITYGMDQzZLsAIu6TwIeS_Eh4O3dTizXHRmVLddAI,58113
+numpy/polynomial/hermite.pyc,,
+numpy/polynomial/hermite_e.py,sha256=NuXm8lrR5s8yli_bUauvgUsX0p0yM-EwtvNj6srObtU,58237
+numpy/polynomial/hermite_e.pyc,,
+numpy/polynomial/laguerre.py,sha256=o3ZvgUi9ivpWGqPIk6hCAGcs8P9mljAxP54oXpQfYiM,56513
+numpy/polynomial/laguerre.pyc,,
+numpy/polynomial/legendre.py,sha256=UvhOne6SyvBKj5DIYGyfD9bu_RcMqADY5ezbVkhBR8s,57701
+numpy/polynomial/legendre.pyc,,
+numpy/polynomial/polynomial.py,sha256=9XK_JxmTv4IjCgtJ03dd6ASNDIe8H0crEOa7MbstUTI,53443
+numpy/polynomial/polynomial.pyc,,
+numpy/polynomial/polyutils.py,sha256=2qA03OFnMiXQj6aF0vIUWN2B99D4yd-dEMSMOYDOLf0,11529
+numpy/polynomial/polyutils.pyc,,
+numpy/polynomial/setup.py,sha256=PKIUV6Jh7_0jBboPp3IHPmp6LWVs4tbIkdu_FtmI_5U,385
+numpy/polynomial/setup.pyc,,
+numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/polynomial/tests/__init__.pyc,,
+numpy/polynomial/tests/test_chebyshev.py,sha256=ntXcwISDcgmlGGfMhwYWiWdjqtTVVUfFdQHm-Msx5yc,20719
+numpy/polynomial/tests/test_chebyshev.pyc,,
+numpy/polynomial/tests/test_classes.py,sha256=F07n2iV0_XPK2JC_8egdnO6FDETpttR7gW6e-8zDtEs,20056
+numpy/polynomial/tests/test_classes.pyc,,
+numpy/polynomial/tests/test_hermite.py,sha256=OQNcZGOg1FoqVVXrouHqanG2d2zCUEUdhutRV9WaIOs,18758
+numpy/polynomial/tests/test_hermite.pyc,,
+numpy/polynomial/tests/test_hermite_e.py,sha256=W1akH58gTi9VrXh2GiyJfPhpeJzf57XnAFVIgM33hp4,19092
+numpy/polynomial/tests/test_hermite_e.pyc,,
+numpy/polynomial/tests/test_laguerre.py,sha256=fyNsRBmdHn1Sd0uQaoXeL70kJb6nQCMeYE5X7utxkS0,17692
+numpy/polynomial/tests/test_laguerre.pyc,,
+numpy/polynomial/tests/test_legendre.py,sha256=Np3xtAMN5zlioM5HFsyLR4tdAJW0ibvfJbz9QHWMkOo,18456
+numpy/polynomial/tests/test_legendre.pyc,,
+numpy/polynomial/tests/test_polynomial.py,sha256=xbrdofA-XCwZhsyPL5tMKDEZ5cWzxNp6Pz4SV_4nKEU,19552
+numpy/polynomial/tests/test_polynomial.pyc,,
+numpy/polynomial/tests/test_polyutils.py,sha256=GzRz3leypd2UrWE-EwuIWL0lbbj6ks6Mjli3tozDN9U,3081
+numpy/polynomial/tests/test_polyutils.pyc,,
+numpy/polynomial/tests/test_printing.py,sha256=_7O-05q3JEjdxmuzBdWxligQVdC6qGygKmbhfiYW9KQ,2067
+numpy/polynomial/tests/test_printing.pyc,,
+numpy/random/__init__.py,sha256=RvKHC6GpPCEcZdGLYiDG3RH5vEbJfOkL4fM0igfBAAA,6053
+numpy/random/__init__.pyc,,
+numpy/random/info.py,sha256=OzPLVv_aA7kxLu9WdGiRqO2_yA2163PWQi3Lwwrhs3E,109
+numpy/random/info.pyc,,
+numpy/random/mtrand.so,sha256=m1nTioYCsspPoM5cqrMt5FkkqiGErpwL3ulG-HyKuYY,3190922
+numpy/random/randomkit.h,sha256=GOfc27td8dO8YM0WeB_qM313pouCDUt9Ad7nc_lgKI0,6799
+numpy/random/setup.py,sha256=Zm-rZze8r6GWKT-o9tYq2DVym0AMh2tNwE_s6m1Z-Bc,2286
+numpy/random/setup.pyc,,
+numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/random/tests/__init__.pyc,,
+numpy/random/tests/test_random.py,sha256=0V5bRWCOZiMrbv96Lhq-nR0R-o8yedZ8Jgy1ewfP_Gc,66167
+numpy/random/tests/test_random.pyc,,
+numpy/random/tests/test_regression.py,sha256=Eb0wEE5cbGklIwcSTNkPI6CpawspICJsx3lYtTDxl7A,5671
+numpy/random/tests/test_regression.pyc,,
+numpy/setup.py,sha256=lsyhnRXfo0ybq63nVUX8HnYhQ1mI0bSic-mk-lK3wnc,920
+numpy/setup.pyc,,
+numpy/testing/__init__.py,sha256=MHRK5eimwrC9RE723HlOcOQGxu5HAmQ-qwlcVX1sZ1k,632
+numpy/testing/__init__.pyc,,
+numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/testing/_private/__init__.pyc,,
+numpy/testing/_private/decorators.py,sha256=JSIBsQH4t1rdMcr1-Cf2jBJ6CXzIGEFyZoWxUJuXI7M,9015
+numpy/testing/_private/decorators.pyc,,
+numpy/testing/_private/noseclasses.py,sha256=nYtV16KcoqAcHswfYO-u6bRIrDBvCvpqjCNfl7zk-SA,14601
+numpy/testing/_private/noseclasses.pyc,,
+numpy/testing/_private/nosetester.py,sha256=S1nEtDBvNT87Zrt8XmuSVIBWpanJwjtD1YiRlcf7eoA,20515
+numpy/testing/_private/nosetester.pyc,,
+numpy/testing/_private/parameterized.py,sha256=S_cqBegd7kdwVq1kg_DAnywwFPT_g1bjDJ6-LMq0LO4,18316
+numpy/testing/_private/parameterized.pyc,,
+numpy/testing/_private/utils.py,sha256=TNDvuuT_SN73Hr07ODXOvRk8wgueYWF1uYdSSjIJhsk,78209
+numpy/testing/_private/utils.pyc,,
+numpy/testing/decorators.py,sha256=BEktn0PuVlmgUQ_zGVNXu0wQYh3W0_bu61LnQPrxY20,428
+numpy/testing/decorators.pyc,,
+numpy/testing/noseclasses.py,sha256=iZmGKPHAGQIshsEONB-oLt7gHPzx2Bg57oat_M4M5XE,423
+numpy/testing/noseclasses.pyc,,
+numpy/testing/nosetester.py,sha256=as3E0khSkTseCRpyvtOSSq4fJY1K1lrrAyIcXOErTMo,583
+numpy/testing/nosetester.pyc,,
+numpy/testing/print_coercion_tables.py,sha256=F44AObcou_xytUWszku8t1bWuui-4I_18o7Z7zW8l18,2705
+numpy/testing/print_coercion_tables.pyc,,
+numpy/testing/setup.py,sha256=9PnlgcejccUBzaGPi9Po-ElhmuQMAmWCBRdvCDwiKYw,676
+numpy/testing/setup.pyc,,
+numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/testing/tests/__init__.pyc,,
+numpy/testing/tests/test_decorators.py,sha256=JB3wOfG4SHAvGNBJgEnotP6Y7uHgNq00-Ji8-vpTL0M,5921
+numpy/testing/tests/test_decorators.pyc,,
+numpy/testing/tests/test_doctesting.py,sha256=sKBXwuRZwMFSiem3R9egBzzSUB81kkpw9y-Y07iqU2M,1413
+numpy/testing/tests/test_doctesting.pyc,,
+numpy/testing/tests/test_utils.py,sha256=I8zSV7NrpCwC4hF9DXqR22OqMUF65Rg6oljOXBWp010,52756
+numpy/testing/tests/test_utils.pyc,,
+numpy/testing/utils.py,sha256=3Z2wHEc2f-0lZrdDueAdbe96KQw1DqM_aFosea9VRtY,1232
+numpy/testing/utils.pyc,,
+numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+numpy/tests/__init__.pyc,,
+numpy/tests/test_ctypeslib.py,sha256=Fy_dBd80RrBufyeXISkBu6kS3X700qOD5ob0pDjRssg,12276
+numpy/tests/test_ctypeslib.pyc,,
+numpy/tests/test_matlib.py,sha256=WKILeEOe3NdKP_XAy-uCs4VEi7r_ghQ7NUhIgH1LzoM,2158
+numpy/tests/test_matlib.pyc,,
+numpy/tests/test_numpy_version.py,sha256=VtTTZAPnsJ8xtKLy1qYqIwrpcjTtqJ9xP9qP5-p8DbU,647
+numpy/tests/test_numpy_version.pyc,,
+numpy/tests/test_public_api.py,sha256=CmikwegnRWXrMO7vvsZ4Z8O8bwDsfuq5tfOWO-0ccs8,3457
+numpy/tests/test_public_api.pyc,,
+numpy/tests/test_reloading.py,sha256=7sDoPGkvvZas7FhT4oOURt75A5JAcr_Ws2uoJ9cVMiY,1304
+numpy/tests/test_reloading.pyc,,
+numpy/tests/test_scripts.py,sha256=SxlQPb8EttfP4V5iGJyXMBtDWTS3EcYVBN-JWDTtSy4,1637
+numpy/tests/test_scripts.pyc,,
+numpy/tests/test_warnings.py,sha256=ye4TBGnOuPAZyu5bS5JDxYV5hLglUQQfKSrMWwY_phI,2594
+numpy/tests/test_warnings.pyc,,
+numpy/version.py,sha256=5fSzfrOy5GglDQLyQSQBrK0HuGT2cGqu3DYSwiL72Hw,294
+numpy/version.pyc,,
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL
new file mode 100644
index 0000000..295a0ca
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.31.1)
+Root-Is-Purelib: false
+Tag: cp27-cp27mu-manylinux1_x86_64
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt
new file mode 100644
index 0000000..bddf93b
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/entry_points.txt
@@ -0,0 +1,5 @@
+[console_scripts]
+f2py = numpy.f2py.f2py2e:main
+f2py2 = numpy.f2py.f2py2e:main
+f2py2.7 = numpy.f2py.f2py2e:main
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt
new file mode 100644
index 0000000..24ce15a
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy-1.16.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+numpy
diff --git a/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 b/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0
new file mode 100755
index 0000000..ea741fc
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/.libs/libgfortran-ed201abd.so.3.0.0 differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so b/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so
new file mode 100755
index 0000000..a12ec70
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/.libs/libopenblasp-r0-382c8f3a.3.5.dev.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt b/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt
new file mode 100644
index 0000000..8e6cc62
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/LICENSE.txt
@@ -0,0 +1,940 @@
+Copyright (c) 2005-2019, NumPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+The NumPy repository and source distributions bundle several libraries that are
+compatibly licensed. We list these here.
+
+Name: Numpydoc
+Files: doc/sphinxext/numpydoc/*
+License: 2-clause BSD
+ For details, see doc/sphinxext/LICENSE.txt
+
+Name: scipy-sphinx-theme
+Files: doc/scipy-sphinx-theme/*
+License: 3-clause BSD, PSF and Apache 2.0
+ For details, see doc/scipy-sphinx-theme/LICENSE.txt
+
+Name: lapack-lite
+Files: numpy/linalg/lapack_lite/*
+License: 3-clause BSD
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
+
+Name: tempita
+Files: tools/npy_tempita/*
+License: BSD derived
+ For details, see tools/npy_tempita/license.txt
+
+Name: dragon4
+Files: numpy/core/src/multiarray/dragon4.c
+License: One of a kind
+ For license text, see numpy/core/src/multiarray/dragon4.c
+
+----
+
+This binary distribution of NumPy also bundles the following software:
+
+
+Name: OpenBLAS
+Files: .libs/libopenb*.so
+Description: bundled as a dynamically linked library
+Availability: https://github.com/xianyi/OpenBLAS/
+License: 3-clause BSD
+ Copyright (c) 2011-2014, The OpenBLAS Project
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ 3. Neither the name of the OpenBLAS project nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: LAPACK
+Files: .libs/libopenb*.so
+Description: bundled in OpenBLAS
+Availability: https://github.com/xianyi/OpenBLAS/
+License 3-clause BSD
+ Copyright (c) 1992-2013 The University of Tennessee and The University
+ of Tennessee Research Foundation. All rights
+ reserved.
+ Copyright (c) 2000-2013 The University of California Berkeley. All
+ rights reserved.
+ Copyright (c) 2006-2013 The University of Colorado Denver. All rights
+ reserved.
+
+ $COPYRIGHT$
+
+ Additional copyrights may follow
+
+ $HEADER$
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer listed
+ in this license in the documentation and/or other materials
+ provided with the distribution.
+
+ - Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ The copyright holders provide no reassurances that the source code
+ provided does not infringe any patent, copyright, or any other
+ intellectual property rights of third parties. The copyright holders
+ disclaim any liability to any recipient for claims brought against
+ recipient by any third party for infringement of that parties
+ intellectual property rights.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Name: GCC runtime library
+Files: .libs/libgfortran*.so
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/viewcvs/gcc/
+License: GPLv3 + runtime exception
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ .
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc.
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/project/venv/lib/python2.7/site-packages/numpy/__config__.py b/project/venv/lib/python2.7/site-packages/numpy/__config__.py
new file mode 100644
index 0000000..adfef3f
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/__config__.py
@@ -0,0 +1,39 @@
+# This file is generated by numpy's setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+
+
+import os
+import sys
+
+extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
+
+lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
+blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
+blis_info={}
+openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
+openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
+lapack_mkl_info={}
+blas_mkl_info={}
+
+def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+def show():
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
\ No newline at end of file
diff --git a/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc b/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc
new file mode 100644
index 0000000..73697b7
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/__config__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/__init__.py
new file mode 100644
index 0000000..ba88c73
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/__init__.py
@@ -0,0 +1,219 @@
+"""
+NumPy
+=====
+
+Provides
+ 1. An array object of arbitrary homogeneous items
+ 2. Fast mathematical operations over arrays
+ 3. Linear Algebra, Fourier Transforms, Random Number Generation
+
+How to use the documentation
+----------------------------
+Documentation is available in two forms: docstrings provided
+with the code, and a loose standing reference guide, available from
+`the NumPy homepage `_.
+
+We recommend exploring the docstrings using
+`IPython `_, an advanced Python shell with
+TAB-completion and introspection capabilities. See below for further
+instructions.
+
+The docstring examples assume that `numpy` has been imported as `np`::
+
+ >>> import numpy as np
+
+Code snippets are indicated by three greater-than signs::
+
+ >>> x = 42
+ >>> x = x + 1
+
+Use the built-in ``help`` function to view a function's docstring::
+
+ >>> help(np.sort)
+ ... # doctest: +SKIP
+
+For some objects, ``np.info(obj)`` may provide additional help. This is
+particularly true if you see the line "Help on ufunc object:" at the top
+of the help() page. Ufuncs are implemented in C, not Python, for speed.
+The native Python help() does not know how to view their help, but our
+np.info() function does.
+
+To search for documents containing a keyword, do::
+
+ >>> np.lookfor('keyword')
+ ... # doctest: +SKIP
+
+General-purpose documents like a glossary and help on the basic concepts
+of numpy are available under the ``doc`` sub-module::
+
+ >>> from numpy import doc
+ >>> help(doc)
+ ... # doctest: +SKIP
+
+Available subpackages
+---------------------
+doc
+ Topical documentation on broadcasting, indexing, etc.
+lib
+ Basic functions used by several sub-packages.
+random
+ Core Random Tools
+linalg
+ Core Linear Algebra Tools
+fft
+ Core FFT routines
+polynomial
+ Polynomial tools
+testing
+ NumPy testing tools
+f2py
+ Fortran to Python Interface Generator.
+distutils
+ Enhancements to distutils with support for
+ Fortran compilers support and more.
+
+Utilities
+---------
+test
+ Run numpy unittests
+show_config
+ Show numpy build configuration
+dual
+ Overwrite certain functions with high-performance Scipy tools
+matlib
+ Make everything matrices.
+__version__
+ NumPy version string
+
+Viewing documentation using IPython
+-----------------------------------
+Start IPython with the NumPy profile (``ipython -p numpy``), which will
+import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
+paste examples into the shell. To see which functions are available in
+`numpy`, type ``np.`` (where ```` refers to the TAB key), or use
+``np.*cos*?`` (where ```` refers to the ENTER key) to narrow
+down the list. To view the docstring for a function, use
+``np.cos?`` (to view the docstring) and ``np.cos??`` (to view
+the source code).
+
+Copies vs. in-place operation
+-----------------------------
+Most of the functions in `numpy` return a copy of the array argument
+(e.g., `np.sort`). In-place versions of these functions are often
+available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
+Exceptions to this rule are documented.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+
+from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
+from ._globals import _NoValue
+
+# We first need to detect if we're being called as part of the numpy setup
+# procedure itself in a reliable manner.
+try:
+ __NUMPY_SETUP__
+except NameError:
+ __NUMPY_SETUP__ = False
+
+if __NUMPY_SETUP__:
+ sys.stderr.write('Running from numpy source directory.\n')
+else:
+ try:
+ from numpy.__config__ import show as show_config
+ except ImportError:
+ msg = """Error importing numpy: you should not try to import numpy from
+ its source directory; please exit the numpy source tree, and relaunch
+ your python interpreter from there."""
+ raise ImportError(msg)
+
+ from .version import git_revision as __git_revision__
+ from .version import version as __version__
+
+ __all__ = ['ModuleDeprecationWarning',
+ 'VisibleDeprecationWarning']
+
+ # Allow distributors to run custom init code
+ from . import _distributor_init
+
+ from . import core
+ from .core import *
+ from . import compat
+ from . import lib
+ from .lib import *
+ from . import linalg
+ from . import fft
+ from . import polynomial
+ from . import random
+ from . import ctypeslib
+ from . import ma
+ from . import matrixlib as _mat
+ from .matrixlib import *
+ from .compat import long
+
+ # Make these accessible from numpy name-space
+ # but not imported in from numpy import *
+ if sys.version_info[0] >= 3:
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
+ else:
+ from __builtin__ import bool, int, float, complex, object, unicode, str
+
+ from .core import round, abs, max, min
+ # now that numpy modules are imported, can initialize limits
+ core.getlimits._register_known_types()
+
+ __all__.extend(['__version__', 'show_config'])
+ __all__.extend(core.__all__)
+ __all__.extend(_mat.__all__)
+ __all__.extend(lib.__all__)
+ __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+
+ # Filter out Cython harmless warnings
+ warnings.filterwarnings("ignore", message="numpy.dtype size changed")
+ warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+ warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
+
+ # oldnumeric and numarray were removed in 1.9. In case some packages import
+ # but do not use them, we define them here for backward compatibility.
+ oldnumeric = 'removed'
+ numarray = 'removed'
+
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
+ from .testing import Tester
+
+ # Pytest testing
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - 2.0) < 1e-5:
+ raise AssertionError()
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in, or by mixing "
+ "package managers (pip, conda, apt, ...). Search closed "
+ "numpy issues for similar problems.")
+ raise RuntimeError(msg.format(__file__))
+
+ _sanity_check()
+ del _sanity_check
diff --git a/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc
new file mode 100644
index 0000000..310aea8
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py
new file mode 100644
index 0000000..d893ba3
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.py
@@ -0,0 +1,10 @@
+""" Distributor init file
+
+Distributors: you can add custom code here to support particular distributions
+of numpy.
+
+For example, this is a good place to put any checks for hardware requirements.
+
+The numpy standard source distribution will not put code in this file, so you
+can safely replace this file with your own version.
+"""
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc
new file mode 100644
index 0000000..bcdf59a
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_distributor_init.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_globals.py b/project/venv/lib/python2.7/site-packages/numpy/_globals.py
new file mode 100644
index 0000000..f5c0761
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/_globals.py
@@ -0,0 +1,81 @@
+"""
+Module defining global singleton classes.
+
+This module raises a RuntimeError if an attempt to reload it is made. In that
+way the identities of the classes defined here are fixed and will remain so
+even if numpy itself is reloaded. In particular, a function like the following
+will still work correctly after numpy is reloaded::
+
+ def foo(arg=np._NoValue):
+ if arg is np._NoValue:
+ ...
+
+That was not the case when the singleton classes were defined in the numpy
+``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
+motivated this module.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+__ALL__ = [
+ 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
+ ]
+
+
+# Disallow reloading this module so as to preserve the identities of the
+# classes defined here.
+if '_is_loaded' in globals():
+ raise RuntimeError('Reloading numpy._globals is not allowed')
+_is_loaded = True
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+ """Module deprecation warning.
+
+ The nose tester turns ordinary Deprecation warnings into test failures.
+ That makes it hard to deprecate whole modules, because they get
+ imported by default. So this is a special Deprecation warning that the
+ nose tester will let pass without making tests fail.
+
+ """
+
+
+ModuleDeprecationWarning.__module__ = 'numpy'
+
+
+class VisibleDeprecationWarning(UserWarning):
+ """Visible deprecation warning.
+
+ By default, python will not show deprecation warnings, so this class
+ can be used when a very visible warning is helpful, for example because
+ the usage is most likely a user bug.
+
+ """
+
+
+VisibleDeprecationWarning.__module__ = 'numpy'
+
+
+class _NoValueType(object):
+ """Special keyword value.
+
+ The instance of this class may be used as the default value assigned to a
+ deprecated keyword in order to check if it has been given a user defined
+ value.
+ """
+ __instance = None
+ def __new__(cls):
+ # ensure that only one instance exists
+ if not cls.__instance:
+ cls.__instance = super(_NoValueType, cls).__new__(cls)
+ return cls.__instance
+
+ # needed for python 2 to preserve identity through a pickle
+ def __reduce__(self):
+ return (self.__class__, ())
+
+ def __repr__(self):
+ return ""
+
+
+_NoValue = _NoValueType()
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc b/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc
new file mode 100644
index 0000000..87566c2
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_globals.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py
new file mode 100644
index 0000000..8d1a381
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.py
@@ -0,0 +1,209 @@
+"""
+Pytest test running.
+
+This module implements the ``test()`` function for NumPy modules. The usual
+boiler plate for doing that is to put the following in the module
+``__init__.py`` file::
+
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+
+Warnings filtering and other runtime settings should be dealt with in the
+``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
+whether or not that file is found as follows:
+
+* ``pytest.ini`` is present (develop mode)
+ All warnings except those explicily filtered out are raised as error.
+* ``pytest.ini`` is absent (release mode)
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
+ warnings are passed through.
+
+In practice, tests run from the numpy repo are run in develop mode. That
+includes the standard ``python runtests.py`` invocation.
+
+This module is imported by every numpy subpackage, so lies at the top level to
+simplify circular import issues. For the same reason, it contains no numpy
+imports at module scope, instead importing numpy within function calls.
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+
+__all__ = ['PytestTester']
+
+
+
+def _show_numpy_info():
+ import numpy as np
+
+ print("NumPy version %s" % np.__version__)
+ relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+
+
+class PytestTester(object):
+ """
+ Pytest test runner.
+
+ This class is made available in ``numpy.testing``, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+ Calling this test function finds and runs all tests associated with the
+ module and all its sub-modules.
+
+ Attributes
+ ----------
+ module_name : str
+ Full path to the package to test.
+
+ Parameters
+ ----------
+ module_name : module name
+ The name of the module to test.
+
+ """
+ def __init__(self, module_name):
+ self.module_name = module_name
+
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, durations=-1, tests=None):
+ """
+ Run tests for module using pytest.
+
+ Parameters
+ ----------
+ label : {'fast', 'full'}, optional
+ Identifies the tests to run. When set to 'fast', tests decorated
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
+ is ignored.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to pytests.
+ doctests : bool, optional
+ .. note:: Not supported
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ Requires installation of (pip) pytest-cov.
+ durations : int, optional
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
+ report the time of the slowest `timer` tests. Default is -1.
+ tests : test or list of tests
+ Tests to be executed with pytest '--pyargs'
+
+ Returns
+ -------
+ result : bool
+ Return True on success, false otherwise.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for
+ it. For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ ...
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
+ >>> result
+ True
+
+ """
+ import pytest
+ import warnings
+
+ #FIXME This is no longer needed? Assume it was for use in tests.
+ # cap verbosity at 3, which is equivalent to the pytest '-vv' option
+ #from . import utils
+ #verbose = min(int(verbose), 3)
+ #utils.verbose = verbose
+ #
+
+ module = sys.modules[self.module_name]
+ module_path = os.path.abspath(module.__path__[0])
+
+ # setup the pytest arguments
+ pytest_args = ["-l"]
+
+ # offset verbosity. The "-q" cancels a "-v".
+ pytest_args += ["-q"]
+
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from numpy.distutils import cpuinfo
+
+ # Filter out annoying import messages. Want these in both develop and
+ # release mode.
+ pytest_args += [
+ "-W ignore:Not importing directory",
+ "-W ignore:numpy.dtype size changed",
+ "-W ignore:numpy.ufunc size changed",
+ "-W ignore::UserWarning:cpuinfo",
+ ]
+
+ # When testing matrices, ignore their PendingDeprecationWarnings
+ pytest_args += [
+ "-W ignore:the matrix subclass is not",
+ ]
+
+ # Ignore python2.7 -3 warnings
+ pytest_args += [
+ r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning",
+ r"-W ignore:in 3\.x, __setslice__:DeprecationWarning",
+ r"-W ignore:in 3\.x, __getslice__:DeprecationWarning",
+ r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning",
+ r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning",
+ r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning",
+ r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning",
+ r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning",
+ ]
+
+
+ if doctests:
+ raise ValueError("Doctests not supported")
+
+ if extra_argv:
+ pytest_args += list(extra_argv)
+
+ if verbose > 1:
+ pytest_args += ["-" + "v"*(verbose - 1)]
+
+ if coverage:
+ pytest_args += ["--cov=" + module_path]
+
+ if label == "fast":
+ pytest_args += ["-m", "not slow"]
+ elif label != "full":
+ pytest_args += ["-m", label]
+
+ if durations >= 0:
+ pytest_args += ["--durations=%s" % durations]
+
+ if tests is None:
+ tests = [self.module_name]
+
+ pytest_args += ["--pyargs"] + list(tests)
+
+
+ # run tests.
+ _show_numpy_info()
+
+ try:
+ code = pytest.main(pytest_args)
+ except SystemExit as exc:
+ code = exc.code
+
+ return code == 0
diff --git a/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc
new file mode 100644
index 0000000..871cade
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/_pytesttester.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py
new file mode 100644
index 0000000..5b371f5
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.py
@@ -0,0 +1,20 @@
+"""
+Compatibility module.
+
+This module contains duplicated code from Python itself or 3rd party
+extensions, which may be included for the following reasons:
+
+ * compatibility
+ * we may only need a small subset of the copied library/module
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from . import _inspect
+from . import py3k
+from ._inspect import getargspec, formatargspec
+from .py3k import *
+
+__all__ = []
+__all__.extend(_inspect.__all__)
+__all__.extend(py3k.__all__)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc
new file mode 100644
index 0000000..49a0de9
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py
new file mode 100644
index 0000000..439d0d2
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.py
@@ -0,0 +1,193 @@
+"""Subset of inspect module from upstream python
+
+We use this instead of upstream because upstream inspect is slow to import, and
+significantly contributes to numpy import times. Importing this copy has almost
+no overhead.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import types
+
+__all__ = ['getargspec', 'formatargspec']
+
+# ----------------------------------------------------------- type-checking
+def ismethod(object):
+ """Return true if the object is an instance method.
+
+ Instance method objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this method was defined
+ im_class class object in which this method belongs
+ im_func function object containing implementation of method
+ im_self instance to which this method is bound, or None
+
+ """
+ return isinstance(object, types.MethodType)
+
+def isfunction(object):
+ """Return true if the object is a user-defined function.
+
+ Function objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this function was defined
+ func_code code object containing compiled function bytecode
+ func_defaults tuple of any default values for arguments
+ func_doc (same as __doc__)
+ func_globals global namespace in which this function was defined
+ func_name (same as __name__)
+
+ """
+ return isinstance(object, types.FunctionType)
+
+def iscode(object):
+ """Return true if the object is a code object.
+
+ Code objects provide these attributes:
+ co_argcount number of arguments (not including * or ** args)
+ co_code string of raw compiled bytecode
+ co_consts tuple of constants used in the bytecode
+ co_filename name of file in which this code object was created
+ co_firstlineno number of first line in Python source code
+ co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
+ co_lnotab encoded mapping of line numbers to bytecode indices
+ co_name name with which this code object was defined
+ co_names tuple of names of local variables
+ co_nlocals number of local variables
+ co_stacksize virtual machine stack space required
+ co_varnames tuple of names of arguments and local variables
+
+ """
+ return isinstance(object, types.CodeType)
+
+# ------------------------------------------------ argument list extraction
+# These constants are from Python's compile.h.
+CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
+
+def getargs(co):
+ """Get information about the arguments accepted by a code object.
+
+ Three things are returned: (args, varargs, varkw), where 'args' is
+ a list of argument names (possibly containing nested lists), and
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+
+ """
+
+ if not iscode(co):
+ raise TypeError('arg is not a code object')
+
+ nargs = co.co_argcount
+ names = co.co_varnames
+ args = list(names[:nargs])
+
+ # The following acrobatics are for anonymous (tuple) arguments.
+ # Which we do not need to support, so remove to avoid importing
+ # the dis module.
+ for i in range(nargs):
+ if args[i][:1] in ['', '.']:
+ raise TypeError("tuple function arguments are not supported")
+ varargs = None
+ if co.co_flags & CO_VARARGS:
+ varargs = co.co_varnames[nargs]
+ nargs = nargs + 1
+ varkw = None
+ if co.co_flags & CO_VARKEYWORDS:
+ varkw = co.co_varnames[nargs]
+ return args, varargs, varkw
+
+def getargspec(func):
+ """Get the names and default values of a function's arguments.
+
+ A tuple of four things is returned: (args, varargs, varkw, defaults).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'defaults' is an n-tuple of the default values of the last n arguments.
+
+ """
+
+ if ismethod(func):
+ func = func.__func__
+ if not isfunction(func):
+ raise TypeError('arg is not a Python function')
+ args, varargs, varkw = getargs(func.__code__)
+ return args, varargs, varkw, func.__defaults__
+
+def getargvalues(frame):
+ """Get information about arguments passed into a particular frame.
+
+ A tuple of four things is returned: (args, varargs, varkw, locals).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'locals' is the locals dictionary of the given frame.
+
+ """
+ args, varargs, varkw = getargs(frame.f_code)
+ return args, varargs, varkw, frame.f_locals
+
+def joinseq(seq):
+ if len(seq) == 1:
+ return '(' + seq[0] + ',)'
+ else:
+ return '(' + ', '.join(seq) + ')'
+
+def strseq(object, convert, join=joinseq):
+ """Recursively walk a sequence, stringifying each element.
+
+ """
+ if type(object) in [list, tuple]:
+ return join([strseq(_o, convert, join) for _o in object])
+ else:
+ return convert(object)
+
+def formatargspec(args, varargs=None, varkw=None, defaults=None,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargspec.
+
+ The first four arguments are (args, varargs, varkw, defaults). The
+ other four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ specs = []
+ if defaults:
+ firstdefault = len(args) - len(defaults)
+ for i in range(len(args)):
+ spec = strseq(args[i], formatarg, join)
+ if defaults and i >= firstdefault:
+ spec = spec + formatvalue(defaults[i - firstdefault])
+ specs.append(spec)
+ if varargs is not None:
+ specs.append(formatvarargs(varargs))
+ if varkw is not None:
+ specs.append(formatvarkw(varkw))
+ return '(' + ', '.join(specs) + ')'
+
+def formatargvalues(args, varargs, varkw, locals,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargvalues.
+
+ The first four arguments are (args, varargs, varkw, locals). The
+ next four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ def convert(name, locals=locals,
+ formatarg=formatarg, formatvalue=formatvalue):
+ return formatarg(name) + formatvalue(locals[name])
+ specs = [strseq(arg, convert, join) for arg in args]
+
+ if varargs:
+ specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
+ if varkw:
+ specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
+ return '(' + ', '.join(specs) + ')'
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc
new file mode 100644
index 0000000..c453d4d
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/_inspect.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py
new file mode 100644
index 0000000..0672927
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.py
@@ -0,0 +1,243 @@
+"""
+Python 3 compatibility tools.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
+ 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
+ 'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
+ 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
+ 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
+
+import sys
+try:
+ from pathlib import Path, PurePath
+except ImportError:
+ Path = PurePath = None
+
+if sys.version_info[0] >= 3:
+ import io
+
+ long = int
+ integer_types = (int,)
+ basestring = str
+ unicode = str
+ bytes = bytes
+
+ def asunicode(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
+
+ def asbytes(s):
+ if isinstance(s, bytes):
+ return s
+ return str(s).encode('latin1')
+
+ def asstr(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
+
+ def isfileobj(f):
+ return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
+
+ def open_latin1(filename, mode='r'):
+ return open(filename, mode=mode, encoding='iso-8859-1')
+
+ def sixu(s):
+ return s
+
+ strchar = 'U'
+
+
+else:
+ bytes = str
+ long = long
+ basestring = basestring
+ unicode = unicode
+ integer_types = (int, long)
+ asbytes = str
+ asstr = str
+ strchar = 'S'
+
+ def isfileobj(f):
+ return isinstance(f, file)
+
+ def asunicode(s):
+ if isinstance(s, unicode):
+ return s
+ return str(s).decode('ascii')
+
+ def open_latin1(filename, mode='r'):
+ return open(filename, mode=mode)
+
+ def sixu(s):
+ return unicode(s, 'unicode_escape')
+
+
+def getexception():
+ return sys.exc_info()[1]
+
+def asbytes_nested(x):
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+ return [asbytes_nested(y) for y in x]
+ else:
+ return asbytes(x)
+
+def asunicode_nested(x):
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+ return [asunicode_nested(y) for y in x]
+ else:
+ return asunicode(x)
+
+def is_pathlib_path(obj):
+ """
+ Check whether obj is a pathlib.Path object.
+
+ Prefer using `isinstance(obj, os_PathLike)` instead of this function.
+ """
+ return Path is not None and isinstance(obj, Path)
+
+# from Python 3.7
+class contextlib_nullcontext(object):
+ """Context manager that does no additional processing.
+
+ Used as a stand-in for a normal context manager, when a particular
+ block of code is only sometimes used with a normal context manager:
+
+ cm = optional_cm if condition else nullcontext()
+ with cm:
+ # Perform operation, using optional_cm if condition is True
+ """
+
+ def __init__(self, enter_result=None):
+ self.enter_result = enter_result
+
+ def __enter__(self):
+ return self.enter_result
+
+ def __exit__(self, *excinfo):
+ pass
+
+
+if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
+ def npy_load_module(name, fn, info=None):
+ """
+ Load a module.
+
+ .. versionadded:: 1.11.2
+
+ Parameters
+ ----------
+ name : str
+ Full module name.
+ fn : str
+ Path to module file.
+ info : tuple, optional
+ Only here for backward compatibility with Python 2.*.
+
+ Returns
+ -------
+ mod : module
+
+ """
+ import importlib.machinery
+ return importlib.machinery.SourceFileLoader(name, fn).load_module()
+else:
+ def npy_load_module(name, fn, info=None):
+ """
+ Load a module.
+
+ .. versionadded:: 1.11.2
+
+ Parameters
+ ----------
+ name : str
+ Full module name.
+ fn : str
+ Path to module file.
+ info : tuple, optional
+ Information as returned by `imp.find_module`
+ (suffix, mode, type).
+
+ Returns
+ -------
+ mod : module
+
+ """
+ import imp
+ import os
+ if info is None:
+ path = os.path.dirname(fn)
+ fo, fn, info = imp.find_module(name, [path])
+ else:
+ fo = open(fn, info[1])
+ try:
+ mod = imp.load_module(name, fo, fn, info)
+ finally:
+ fo.close()
+ return mod
+
+# backport abc.ABC
+import abc
+if sys.version_info[:2] >= (3, 4):
+ abc_ABC = abc.ABC
+else:
+ abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
+
+
+# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
+if sys.version_info[:2] >= (3, 6):
+ import os
+ os_fspath = os.fspath
+ os_PathLike = os.PathLike
+else:
+ def _PurePath__fspath__(self):
+ return str(self)
+
+ class os_PathLike(abc_ABC):
+ """Abstract base class for implementing the file system path protocol."""
+
+ @abc.abstractmethod
+ def __fspath__(self):
+ """Return the file system path representation of the object."""
+ raise NotImplementedError
+
+ @classmethod
+ def __subclasshook__(cls, subclass):
+ if PurePath is not None and issubclass(subclass, PurePath):
+ return True
+ return hasattr(subclass, '__fspath__')
+
+
+ def os_fspath(path):
+ """Return the path representation of a path-like object.
+ If str or bytes is passed in, it is returned unchanged. Otherwise the
+ os.PathLike interface is used to get the path representation. If the
+ path representation is not str or bytes, TypeError is raised. If the
+ provided path is not str, bytes, or os.PathLike, TypeError is raised.
+ """
+ if isinstance(path, (unicode, bytes)):
+ return path
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ path_repr = path_type.__fspath__(path)
+ except AttributeError:
+ if hasattr(path_type, '__fspath__'):
+ raise
+ elif PurePath is not None and issubclass(path_type, PurePath):
+ return _PurePath__fspath__(path)
+ else:
+ raise TypeError("expected str, bytes or os.PathLike object, "
+ "not " + path_type.__name__)
+ if isinstance(path_repr, (unicode, bytes)):
+ return path_repr
+ else:
+ raise TypeError("expected {}.__fspath__() to return str or bytes, "
+ "not {}".format(path_type.__name__,
+ type(path_repr).__name__))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc
new file mode 100644
index 0000000..94e7c18
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/py3k.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py
new file mode 100644
index 0000000..8828574
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.py
@@ -0,0 +1,12 @@
+from __future__ import division, print_function
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('compat', parent_package, top_path)
+ config.add_data_dir('tests')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc
new file mode 100644
index 0000000..20a6b64
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/setup.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc
new file mode 100644
index 0000000..1d7fa18
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py
new file mode 100644
index 0000000..9bb316a
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.py
@@ -0,0 +1,26 @@
+from __future__ import division, absolute_import, print_function
+
+from os.path import join
+
+from numpy.compat import isfileobj, os_fspath
+from numpy.testing import assert_
+from numpy.testing import tempdir
+
+
+def test_isfileobj():
+ with tempdir(prefix="numpy_test_compat_") as folder:
+ filename = join(folder, 'a.bin')
+
+ with open(filename, 'wb') as f:
+ assert_(isfileobj(f))
+
+ with open(filename, 'ab') as f:
+ assert_(isfileobj(f))
+
+ with open(filename, 'rb') as f:
+ assert_(isfileobj(f))
+
+
+def test_os_fspath_strings():
+ for string_path in (b'/a/b/c.d', u'/a/b/c.d'):
+ assert_(os_fspath(string_path) == string_path)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc
new file mode 100644
index 0000000..74cb905
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/compat/tests/test_compat.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/conftest.py b/project/venv/lib/python2.7/site-packages/numpy/conftest.py
new file mode 100644
index 0000000..4d4d055
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/conftest.py
@@ -0,0 +1,60 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+from __future__ import division, absolute_import, print_function
+
+import pytest
+import numpy
+
+from numpy.core._multiarray_tests import get_fpu_mode
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+
+#FIXME when yield tests are gone.
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+ """
+ Check FPU precision mode was not changed during test collection.
+
+ The clumsy way we do it here is mainly necessary because numpy
+ still uses yield tests, which can execute code at test collection
+ time.
+ """
+ global _old_fpu_mode
+
+ mode = get_fpu_mode()
+
+ if _old_fpu_mode is None:
+ _old_fpu_mode = mode
+ elif mode != _old_fpu_mode:
+ _collect_results[item] = (_old_fpu_mode, mode)
+ _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU precision mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " during the test".format(old_mode, new_mode))
+
+ collect_result = _collect_results.get(request.node)
+ if collect_result is not None:
+ old_mode, new_mode = collect_result
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " when collecting the test".format(old_mode,
+ new_mode))
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+ doctest_namespace['np'] = numpy
diff --git a/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc b/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc
new file mode 100644
index 0000000..88ac4a8
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/conftest.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py
new file mode 100644
index 0000000..c6a4e93
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.py
@@ -0,0 +1,171 @@
+from __future__ import division, absolute_import, print_function
+
+from .info import __doc__
+from numpy.version import version as __version__
+
+import os
+
+# on Windows NumPy loads an important OpenBLAS-related DLL
+# and the code below aims to alleviate issues with DLL
+# path resolution portability with an absolute path DLL load
+if os.name == 'nt':
+ from ctypes import WinDLL
+ import glob
+ # convention for storing / loading the DLL from
+ # numpy/.libs/, if present
+ libs_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..', '.libs'))
+ DLL_filenames = []
+ if os.path.isdir(libs_path):
+ for filename in glob.glob(os.path.join(libs_path, '*openblas*dll')):
+ # NOTE: would it change behavior to load ALL
+ # DLLs at this path vs. the name restriction?
+ WinDLL(os.path.abspath(filename))
+ DLL_filenames.append(filename)
+ if len(DLL_filenames) > 1:
+ import warnings
+ warnings.warn("loaded more than 1 DLL from .libs:\n%s" %
+ "\n".join(DLL_filenames),
+ stacklevel=1)
+
+# disables OpenBLAS affinity setting of the main thread that limits
+# python threads or processes to one core
+env_added = []
+for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
+ if envkey not in os.environ:
+ os.environ[envkey] = '1'
+ env_added.append(envkey)
+
+try:
+ from . import multiarray
+except ImportError as exc:
+ import sys
+ msg = """
+
+IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
+
+Importing the multiarray numpy extension module failed. Most
+likely you are trying to import a failed build of numpy.
+Here is how to proceed:
+- If you're working with a numpy git repository, try `git clean -xdf`
+ (removes all files not under version control) and rebuild numpy.
+- If you are simply trying to use the numpy version that you have installed:
+ your installation is broken - please reinstall numpy.
+- If you have already reinstalled and that did not fix the problem, then:
+ 1. Check that you are using the Python you expect (you're using %s),
+ and that you have no directories in your PATH or PYTHONPATH that can
+ interfere with the Python and numpy versions you're trying to use.
+ 2. If (1) looks fine, you can open a new issue at
+ https://github.com/numpy/numpy/issues. Please include details on:
+ - how you installed Python
+ - how you installed numpy
+ - your operating system
+ - whether or not you have multiple versions of Python installed
+ - if you built from source, your compiler versions and ideally a build log
+
+ Note: this error has many possible causes, so please don't comment on
+ an existing issue about this - open a new one instead.
+
+Original error was: %s
+""" % (sys.executable, exc)
+ raise ImportError(msg)
+finally:
+ for envkey in env_added:
+ del os.environ[envkey]
+del envkey
+del env_added
+del os
+
+from . import umath
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+ hasattr(umath, '_multiarray_umath')):
+ import sys
+ path = sys.modules['numpy'].__path__
+ msg = ("Something is wrong with the numpy installation. "
+ "While importing we detected an older version of "
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
+ "numpy until none is found, then reinstall this version.")
+ raise ImportError(msg.format(path))
+
+from . import numerictypes as nt
+multiarray.set_typeDict(nt.sctypeDict)
+from . import numeric
+from .numeric import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import defchararray as char
+from . import records as rec
+from .records import *
+from .memmap import *
+from .defchararray import chararray
+from . import function_base
+from .function_base import *
+from . import machar
+from .machar import *
+from . import getlimits
+from .getlimits import *
+from . import shape_base
+from .shape_base import *
+from . import einsumfunc
+from .einsumfunc import *
+del nt
+
+from .fromnumeric import amax as max, amin as min, round_ as round
+from .numeric import absolute as abs
+
+# do this after everything else, to minimize the chance of this misleadingly
+# appearing in an import-time traceback
+from . import _add_newdocs
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
+
+__all__ = ['char', 'rec', 'memmap']
+__all__ += numeric.__all__
+__all__ += fromnumeric.__all__
+__all__ += rec.__all__
+__all__ += ['chararray']
+__all__ += function_base.__all__
+__all__ += machar.__all__
+__all__ += getlimits.__all__
+__all__ += shape_base.__all__
+__all__ += einsumfunc.__all__
+
+# Make it possible so that ufuncs can be pickled
+# Here are the loading and unloading functions
+# The name numpy.core._ufunc_reconstruct must be
+# available for unpickling to work.
+def _ufunc_reconstruct(module, name):
+ # The `fromlist` kwarg is required to ensure that `mod` points to the
+ # inner-most module rather than the parent package when module name is
+ # nested. This makes it possible to pickle non-toplevel ufuncs such as
+ # scipy.special.expit for instance.
+ mod = __import__(module, fromlist=[name])
+ return getattr(mod, name)
+
+def _ufunc_reduce(func):
+ from pickle import whichmodule
+ name = func.__name__
+ return _ufunc_reconstruct, (whichmodule(func, name), name)
+
+
+import sys
+if sys.version_info[0] >= 3:
+ import copyreg
+else:
+ import copy_reg as copyreg
+
+copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
+# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
+del copyreg
+del sys
+del _ufunc_reduce
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc
new file mode 100644
index 0000000..d3cd51b
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py
new file mode 100644
index 0000000..a242a74
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.py
@@ -0,0 +1,6936 @@
+"""
+This is only meant to add docs to objects defined in C-extension modules.
+The purpose is to allow easier editing of the docstrings without
+requiring a re-compile.
+
+NOTE: Many of the methods of ndarray have corresponding functions.
+ If you update these docstrings, please keep also the ones in
+ core/fromnumeric.py, core/defmatrix.py up-to-date.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from numpy.core import numerictypes as _numerictypes
+from numpy.core import dtype
+from numpy.core.function_base import add_newdoc
+
+###############################################################################
+#
+# flatiter
+#
+# flatiter needs a toplevel description
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'flatiter',
+ """
+ Flat iterator object to iterate over arrays.
+
+ A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
+ It allows iterating over the array as if it were a 1-D array,
+ either in a for-loop or by calling its `next` method.
+
+ Iteration is done in row-major, C-style order (the last
+ index varying the fastest). The iterator can also be indexed using
+ basic slicing or advanced indexing.
+
+ See Also
+ --------
+ ndarray.flat : Return a flat iterator over an array.
+ ndarray.flatten : Returns a flattened copy of an array.
+
+ Notes
+ -----
+ A `flatiter` iterator can not be constructed directly from Python code
+ by calling the `flatiter` constructor.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> type(fl)
+
+ >>> for item in fl:
+ ... print(item)
+ ...
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+
+ >>> fl[2:4]
+ array([2, 3])
+
+ """)
+
+# flatiter attributes
+
+add_newdoc('numpy.core', 'flatiter', ('base',
+ """
+ A reference to the array that is iterated over.
+
+ Examples
+ --------
+ >>> x = np.arange(5)
+ >>> fl = x.flat
+ >>> fl.base is x
+ True
+
+ """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('coords',
+ """
+ An N-dimensional tuple of current coordinates.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.coords
+ (0, 0)
+ >>> fl.next()
+ 0
+ >>> fl.coords
+ (0, 1)
+
+ """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('index',
+ """
+ Current flat index into the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.index
+ 0
+ >>> fl.next()
+ 0
+ >>> fl.index
+ 1
+
+ """))
+
+# flatiter functions
+
+add_newdoc('numpy.core', 'flatiter', ('__array__',
+ """__array__(type=None) Get array from iterator
+
+ """))
+
+
+add_newdoc('numpy.core', 'flatiter', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator as a 1-D array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> fl = x.flat
+ >>> fl.copy()
+ array([0, 1, 2, 3, 4, 5])
+
+ """))
+
+
+###############################################################################
+#
+# nditer
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'nditer',
+ """
+ Efficient multi-dimensional iterator object to iterate over arrays.
+ To get started using this object, see the
+ :ref:`introductory guide to array iteration `.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+ flags : sequence of str, optional
+ Flags to control the behavior of the iterator.
+
+ * "buffered" enables buffering when required.
+ * "c_index" causes a C-order index to be tracked.
+ * "f_index" causes a Fortran-order index to be tracked.
+ * "multi_index" causes a multi-index, or a tuple of indices
+ with one per iteration dimension, to be tracked.
+ * "common_dtype" causes all the operands to be converted to
+ a common data type, with copying or buffering as necessary.
+ * "copy_if_overlap" causes the iterator to determine if read
+ operands have overlap with write operands, and make temporary
+ copies as necessary to avoid overlap. False positives (needless
+ copying) are possible in some cases.
+ * "delay_bufalloc" delays allocation of the buffers until
+ a reset() call is made. Allows "allocate" operands to
+ be initialized before their values are copied into the buffers.
+ * "external_loop" causes the `values` given to be
+ one-dimensional arrays with multiple values instead of
+ zero-dimensional arrays.
+ * "grow_inner" allows the `value` array sizes to be made
+ larger than the buffer size when both "buffered" and
+ "external_loop" is used.
+ * "ranged" allows the iterator to be restricted to a sub-range
+ of the iterindex values.
+ * "refs_ok" enables iteration of reference types, such as
+ object arrays.
+ * "reduce_ok" enables iteration of "readwrite" operands
+ which are broadcasted, also known as reduction operands.
+ * "zerosize_ok" allows `itersize` to be zero.
+ op_flags : list of list of str, optional
+ This is a list of flags for each operand. At minimum, one of
+ "readonly", "readwrite", or "writeonly" must be specified.
+
+ * "readonly" indicates the operand will only be read from.
+ * "readwrite" indicates the operand will be read from and written to.
+ * "writeonly" indicates the operand will only be written to.
+ * "no_broadcast" prevents the operand from being broadcasted.
+ * "contig" forces the operand data to be contiguous.
+ * "aligned" forces the operand data to be aligned.
+ * "nbo" forces the operand data to be in native byte order.
+ * "copy" allows a temporary read-only copy if required.
+ * "updateifcopy" allows a temporary read-write copy if required.
+ * "allocate" causes the array to be allocated if it is None
+ in the `op` parameter.
+ * "no_subtype" prevents an "allocate" operand from using a subtype.
+ * "arraymask" indicates that this operand is the mask to use
+ for selecting elements when writing to operands with the
+ 'writemasked' flag set. The iterator does not enforce this,
+ but when writing from a buffer back to the array, it only
+ copies those elements indicated by this mask.
+ * 'writemasked' indicates that only elements where the chosen
+ 'arraymask' operand is True will be written to.
+ * "overlap_assume_elementwise" can be used to mark operands that are
+ accessed only in the iterator order, to allow less conservative
+ copying when "copy_if_overlap" is present.
+ op_dtypes : dtype or tuple of dtype(s), optional
+ The required data type(s) of the operands. If copying or buffering
+ is enabled, the data will be converted to/from their original types.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the iteration order. 'C' means C order, 'F' means
+ Fortran order, 'A' means 'F' order if all the arrays are Fortran
+ contiguous, 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible. This also
+ affects the element memory order of "allocate" operands, as they
+ are allocated to be compatible with iteration order.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when making a copy
+ or buffering. Setting this to 'unsafe' is not recommended,
+ as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ op_axes : list of list of ints, optional
+ If provided, is a list of ints or None for each operands.
+ The list of axes for an operand is a mapping from the dimensions
+ of the iterator to the dimensions of the operand. A value of
+ -1 can be placed for entries, causing that dimension to be
+ treated as "newaxis".
+ itershape : tuple of ints, optional
+ The desired shape of the iterator. This allows "allocate" operands
+ with a dimension mapped by op_axes not corresponding to a dimension
+ of a different operand to get a value not equal to 1 for that
+ dimension.
+ buffersize : int, optional
+ When buffering is enabled, controls the size of the temporary
+ buffers. Set to 0 for the default value.
+
+ Attributes
+ ----------
+ dtypes : tuple of dtype(s)
+ The data types of the values provided in `value`. This may be
+ different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
+ finished : bool
+ Whether the iteration over the operands is finished or not.
+ has_delayed_bufalloc : bool
+ If True, the iterator was created with the "delay_bufalloc" flag,
+ and no reset() function was called on it yet.
+ has_index : bool
+ If True, the iterator was created with either the "c_index" or
+ the "f_index" flag, and the property `index` can be used to
+ retrieve it.
+ has_multi_index : bool
+ If True, the iterator was created with the "multi_index" flag,
+ and the property `multi_index` can be used to retrieve it.
+ index
+ When the "c_index" or "f_index" flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ and `has_index` is False.
+ iterationneedsapi : bool
+ Whether iteration requires access to the Python API, for example
+ if one of the operands is an object array.
+ iterindex : int
+ An index which matches the order of iteration.
+ itersize : int
+ Size of the iterator.
+ itviews
+ Structured view(s) of `operands` in memory, matching the reordered
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
+ multi_index
+ When the "multi_index" flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ accessed and `has_multi_index` is False.
+ ndim : int
+ The iterator's dimension.
+ nop : int
+ The number of iterator operands.
+ operands : tuple of operand(s)
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
+ shape : tuple of ints
+ Shape tuple, the shape of the iterator.
+ value
+ Value of `operands` at current iteration. Normally, this is a
+ tuple of array scalars, but if the flag "external_loop" is used,
+ it is a tuple of one dimensional arrays.
+
+ Notes
+ -----
+ `nditer` supersedes `flatiter`. The iterator implementation behind
+ `nditer` is also exposed by the NumPy C API.
+
+ The Python exposure supplies two iteration interfaces, one which follows
+ the Python iterator protocol, and another which mirrors the C-style
+ do-while pattern. The native Python approach is better in most cases, but
+ if you need the iterator's coordinates or index, use the C-style pattern.
+
+ Examples
+ --------
+ Here is how we might write an ``iter_add`` function, using the
+ Python iterator protocol::
+
+ def iter_add_py(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+
+ Here is the same function, but following the C-style pattern::
+
+ def iter_add(x, y, out=None):
+ addop = np.add
+
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ while not it.finished:
+ addop(it[0], it[1], out=it[2])
+ it.iternext()
+
+ return it.operands[2]
+
+ Here is an example outer product function::
+
+ def outer_it(x, y, out=None):
+ mulop = np.multiply
+
+ it = np.nditer([x, y, out], ['external_loop'],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+ op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ [-1] * x.ndim + list(range(y.ndim)),
+ None])
+ with it:
+ for (a, b, c) in it:
+ mulop(a, b, out=c)
+ return it.operands[2]
+
+ >>> a = np.arange(2)+1
+ >>> b = np.arange(3)+1
+ >>> outer_it(a,b)
+ array([[1, 2, 3],
+ [2, 4, 6]])
+
+ Here is an example function which operates like a "lambda" ufunc::
+
+ def luf(lamdaexpr, *args, **kwargs):
+ "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
+ nargs = len(args)
+ op = (kwargs.get('out',None),) + args
+ it = np.nditer(op, ['buffered','external_loop'],
+ [['writeonly','allocate','no_broadcast']] +
+ [['readonly','nbo','aligned']]*nargs,
+ order=kwargs.get('order','K'),
+ casting=kwargs.get('casting','safe'),
+ buffersize=kwargs.get('buffersize',0))
+ while not it.finished:
+ it[0] = lamdaexpr(*it[1:])
+ it.iternext()
+ return it.operands[0]
+
+ >>> a = np.arange(5)
+ >>> b = np.ones(5)
+ >>> luf(lambda i,j:i*i + j/2, a, b)
+ array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+
+ If operand flags `"writeonly"` or `"readwrite"` are used the operands may
+ be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
+ nditer must be used as a context manager or the nditer.close
+ method must be called before using the result. The temporary
+ data will be written back to the original data when the `__exit__`
+ function is called but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ array([-1, -2, -3]), array([-1, -2, -3])
+
+ It is important to note that once the iterator is exited, dangling
+ references (like `x` in the example) may or may not share data with
+ the original data `a`. If writeback semantics were active, i.e. if
+ `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+ will sever the connection between `x` and `a`, writing to `x` will
+ no longer write to `a`. If writeback semantics are not active, then
+ `x.data` will still point at some part of `a.data`, and writing to
+ one will affect the other.
+
+ """)
+
+# nditer methods
+
+add_newdoc('numpy.core', 'nditer', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator in its current state.
+
+ Examples
+ --------
+ >>> x = np.arange(10)
+ >>> y = x + 1
+ >>> it = np.nditer([x, y])
+ >>> it.next()
+ (array(0), array(1))
+ >>> it2 = it.copy()
+ >>> it2.next()
+ (array(1), array(2))
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('debug_print',
+ """
+ debug_print()
+
+ Print the current state of the `nditer` instance and debug info to stdout.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
+ """
+ enable_external_loop()
+
+ When the "external_loop" was not used during construction, but
+ is desired, this modifies the iterator to behave as if the flag
+ was specified.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('iternext',
+ """
+ iternext()
+
+ Check whether iterations are left, and perform a single internal iteration
+ without returning the result. Used in the C-style pattern do-while
+ pattern. For an example, see `nditer`.
+
+ Returns
+ -------
+ iternext : bool
+ Whether or not there are iterations left.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_axis',
+ """
+ remove_axis(i)
+
+ Removes axis `i` from the iterator. Requires that the flag "multi_index"
+ be enabled.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
+ """
+ remove_multi_index()
+
+ When the "multi_index" flag was specified, this removes it, allowing
+ the internal iteration structure to be optimized further.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('reset',
+ """
+ reset()
+
+ Reset the iterator to its initial state.
+
+ """))
+
+add_newdoc('numpy.core', 'nested_iters',
+ """
+ Create nditers for use in nested loops
+
+ Create a tuple of `nditer` objects which iterate in nested loops over
+ different axes of the op argument. The first iterator is used in the
+ outermost loop, the last in the innermost loop. Advancing one will change
+ the subsequent iterators to point at its new element.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+
+ axes : list of list of int
+ Each item is used as an "op_axes" argument to an nditer
+
+ flags, op_flags, op_dtypes, order, casting, buffersize (optional)
+ See `nditer` parameters of the same name
+
+ Returns
+ -------
+ iters : tuple of nditer
+ An nditer for each item in `axes`, outermost first
+
+ See Also
+ --------
+ nditer
+
+ Examples
+ --------
+
+ Basic usage. Note how y is the "flattened" version of
+ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
+ the first iter's axes as [1]
+
+ >>> a = np.arange(12).reshape(2, 3, 2)
+ >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
+ >>> for x in i:
+ ... print(i.multi_index)
+ ... for y in j:
+ ... print('', j.multi_index, y)
+
+ (0,)
+ (0, 0) 0
+ (0, 1) 1
+ (1, 0) 6
+ (1, 1) 7
+ (1,)
+ (0, 0) 2
+ (0, 1) 3
+ (1, 0) 8
+ (1, 1) 9
+ (2,)
+ (0, 0) 4
+ (0, 1) 5
+ (1, 0) 10
+ (1, 1) 11
+
+ """)
+
+add_newdoc('numpy.core', 'nditer', ('close',
+ """
+ close()
+
+ Resolve all writeback semantics in writeable operands.
+
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
+ """))
+
+
+###############################################################################
+#
+# broadcast
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'broadcast',
+ """
+ Produce an object that mimics broadcasting.
+
+ Parameters
+ ----------
+ in1, in2, ... : array_like
+ Input parameters.
+
+ Returns
+ -------
+ b : broadcast object
+ Broadcast the input parameters against one another, and
+ return an object that encapsulates the result.
+ Amongst others, it has ``shape`` and ``nd`` properties, and
+ may be used as an iterator.
+
+ See Also
+ --------
+ broadcast_arrays
+ broadcast_to
+
+ Examples
+ --------
+
+ Manually adding two vectors, using broadcasting:
+
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+
+ >>> out = np.empty(b.shape)
+ >>> out.flat = [u+v for (u,v) in b]
+ >>> out
+ array([[ 5., 6., 7.],
+ [ 6., 7., 8.],
+ [ 7., 8., 9.]])
+
+ Compare against built-in broadcasting:
+
+ >>> x + y
+ array([[5, 6, 7],
+ [6, 7, 8],
+ [7, 8, 9]])
+
+ """)
+
+# attributes
+
+add_newdoc('numpy.core', 'broadcast', ('index',
+ """
+ current index in broadcasted result
+
+ Examples
+ --------
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> b.next(), b.next(), b.next()
+ ((1, 4), (1, 5), (1, 6))
+ >>> b.index
+ 3
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('iters',
+ """
+ tuple of iterators along ``self``'s "components."
+
+ Returns a tuple of `numpy.flatiter` objects, one for each "component"
+ of ``self``.
+
+ See Also
+ --------
+ numpy.flatiter
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> row, col = b.iters
+ >>> row.next(), col.next()
+ (1, 4)
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('ndim',
+ """
+ Number of dimensions of broadcasted result. Alias for `nd`.
+
+ .. versionadded:: 1.12.0
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.ndim
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('nd',
+ """
+ Number of dimensions of broadcasted result. For code intended for NumPy
+ 1.12.0 and later the more consistent `ndim` is preferred.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.nd
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('numiter',
+ """
+ Number of iterators possessed by the broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.numiter
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('shape',
+ """
+ Shape of broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.shape
+ (3, 3)
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('size',
+ """
+ Total size of broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.size
+ 9
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('reset',
+ """
+ reset()
+
+ Reset the broadcasted result's iterator(s).
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]]
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> b.next(), b.next(), b.next()
+ ((1, 4), (2, 4), (3, 4))
+ >>> b.index
+ 3
+ >>> b.reset()
+ >>> b.index
+ 0
+
+ """))
+
+###############################################################################
+#
+# numpy functions
+#
+###############################################################################
+
+add_newdoc('numpy.core.multiarray', 'array',
+ """
+ array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
+
+ Create an array.
+
+ Parameters
+ ----------
+ object : array_like
+ An array, any object exposing the array interface, an object whose
+ __array__ method returns an array, or any (nested) sequence.
+ dtype : data-type, optional
+ The desired data-type for the array. If not given, then the type will
+ be determined as the minimum type required to hold the objects in the
+ sequence. This argument can only be used to 'upcast' the array. For
+ downcasting, use the .astype(t) method.
+ copy : bool, optional
+ If true (default), then the object is copied. Otherwise, a copy will
+ only be made if __array__ returns a copy, if obj is a nested sequence,
+ or if a copy is needed to satisfy any of the other requirements
+ (`dtype`, `order`, etc.).
+ order : {'K', 'A', 'C', 'F'}, optional
+ Specify the memory layout of the array. If object is not an array, the
+ newly created array will be in C order (row major) unless 'F' is
+ specified, in which case it will be in Fortran order (column major).
+ If object is an array the following holds.
+
+ ===== ========= ===================================================
+ order no copy copy=True
+ ===== ========= ===================================================
+ 'K' unchanged F & C order preserved, otherwise most similar order
+ 'A' unchanged F order if input is F and not C, otherwise C order
+ 'C' C order C order
+ 'F' F order F order
+ ===== ========= ===================================================
+
+ When ``copy=False`` and a copy is made for other reasons, the result is
+ the same as if ``copy=True``, with some exceptions for `A`, see the
+ Notes section. The default order is 'K'.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+ ndmin : int, optional
+ Specifies the minimum number of dimensions that the resulting
+ array should have. Ones will be pre-pended to the shape as
+ needed to meet this requirement.
+
+ Returns
+ -------
+ out : ndarray
+ An array object satisfying the specified requirements.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Notes
+ -----
+ When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
+ and a copy is forced by a change in dtype, then the order of the result is
+ not necessarily 'C' as expected. This is likely a bug.
+
+ Examples
+ --------
+ >>> np.array([1, 2, 3])
+ array([1, 2, 3])
+
+ Upcasting:
+
+ >>> np.array([1, 2, 3.0])
+ array([ 1., 2., 3.])
+
+ More than one dimension:
+
+ >>> np.array([[1, 2], [3, 4]])
+ array([[1, 2],
+ [3, 4]])
+
+ Minimum dimensions 2:
+
+ >>> np.array([1, 2, 3], ndmin=2)
+ array([[1, 2, 3]])
+
+ Type provided:
+
+ >>> np.array([1, 2, 3], dtype=complex)
+ array([ 1.+0.j, 2.+0.j, 3.+0.j])
+
+ Data-type consisting of more than one element:
+
+ >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a']
+ array([1, 3])
+
+ Creating an array from sub-classes:
+
+ >>> np.array(np.mat('1 2; 3 4'))
+ array([[1, 2],
+ [3, 4]])
+
+ >>> np.array(np.mat('1 2; 3 4'), subok=True)
+ matrix([[1, 2],
+ [3, 4]])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'empty',
+ """
+ empty(shape, dtype=float, order='C')
+
+ Return a new array of given shape and type, without initializing entries.
+
+ Parameters
+ ----------
+ shape : int or tuple of int
+ Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ Desired output data-type for the array, e.g, `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data of the given shape, dtype, and
+ order. Object arrays will be initialized to None.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Notes
+ -----
+ `empty`, unlike `zeros`, does not set the array values to zero,
+ and may therefore be marginally faster. On the other hand, it requires
+ the user to manually set all the values in the array, and should be
+ used with caution.
+
+ Examples
+ --------
+ >>> np.empty([2, 2])
+ array([[ -9.74499359e+001, 6.69583040e-309],
+ [ 2.13182611e-314, 3.06959433e-309]]) #random
+
+ >>> np.empty([2, 2], dtype=int)
+ array([[-1073741821, -1067949133],
+ [ 496041986, 19249760]]) #random
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'scalar',
+ """
+ scalar(dtype, obj)
+
+ Return a new scalar array of the given type initialized with obj.
+
+ This function is meant mainly for pickle support. `dtype` must be a
+ valid data-type descriptor. If `dtype` corresponds to an object
+ descriptor, then `obj` can be any object, otherwise `obj` must be a
+ string. If `obj` is not given, it will be interpreted as None for object
+ type and as zeros for all other types.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'zeros',
+ """
+ zeros(shape, dtype=float, order='C')
+
+ Return a new array of given shape and type, filled with zeros.
+
+ Parameters
+ ----------
+ shape : int or tuple of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of zeros with the given shape, dtype, and order.
+
+ See Also
+ --------
+ zeros_like : Return an array of zeros with shape and type of input.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ full : Return a new array of given shape filled with value.
+
+ Examples
+ --------
+ >>> np.zeros(5)
+ array([ 0., 0., 0., 0., 0.])
+
+ >>> np.zeros((5,), dtype=int)
+ array([0, 0, 0, 0, 0])
+
+ >>> np.zeros((2, 1))
+ array([[ 0.],
+ [ 0.]])
+
+ >>> s = (2,2)
+ >>> np.zeros(s)
+ array([[ 0., 0.],
+ [ 0., 0.]])
+
+ >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
+ array([(0, 0), (0, 0)],
+ dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ')
+ array([1, 2])
+ >>> np.fromstring('1, 2', dtype=int, sep=',')
+ array([1, 2])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'compare_chararrays',
+ """
+ compare_chararrays(a, b, cmp_op, rstrip)
+
+ Performs element-wise comparison of two string arrays using the
+ comparison operator specified by `cmp_op`.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Arrays to be compared.
+ cmp_op : {"<", "<=", "==", ">=", ">", "!="}
+ Type of comparison.
+ rstrip : Boolean
+ If True, the spaces at the end of Strings are removed before the comparison.
+
+ Returns
+ -------
+ out : ndarray
+ The output array of type Boolean with the same shape as a and b.
+
+ Raises
+ ------
+ ValueError
+ If `cmp_op` is not valid.
+ TypeError
+ If at least one of `a` or `b` is a non-string array
+
+ Examples
+ --------
+ >>> a = np.array(["a", "b", "cde"])
+ >>> b = np.array(["a", "a", "dec"])
+ >>> np.compare_chararrays(a, b, ">", True)
+ array([False, True, False])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'fromiter',
+ """
+ fromiter(iterable, dtype, count=-1)
+
+ Create a new 1-dimensional array from an iterable object.
+
+ Parameters
+ ----------
+ iterable : iterable object
+ An iterable object providing data for the array.
+ dtype : data-type
+ The data-type of the returned array.
+ count : int, optional
+ The number of items to read from *iterable*. The default is -1,
+ which means all data is read.
+
+ Returns
+ -------
+ out : ndarray
+ The output array.
+
+ Notes
+ -----
+ Specify `count` to improve performance. It allows ``fromiter`` to
+ pre-allocate the output array, instead of resizing it on demand.
+
+ Examples
+ --------
+ >>> iterable = (x*x for x in range(5))
+ >>> np.fromiter(iterable, float)
+ array([ 0., 1., 4., 9., 16.])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'fromfile',
+ """
+ fromfile(file, dtype=float, count=-1, sep='')
+
+ Construct an array from data in a text or binary file.
+
+ A highly efficient way of reading binary data with a known data-type,
+ as well as parsing simply formatted text files. Data written using the
+ `tofile` method can be read using this function.
+
+ Parameters
+ ----------
+ file : file or str
+ Open file object or filename.
+ dtype : data-type
+ Data type of the returned array.
+ For binary files, it is used to determine the size and byte-order
+ of the items in the file.
+ count : int
+ Number of items to read. ``-1`` means all items (i.e., the complete
+ file).
+ sep : str
+ Separator between items if file is a text file.
+ Empty ("") separator means the file should be treated as binary.
+ Spaces (" ") in the separator match zero or more whitespace characters.
+ A separator consisting only of spaces must match at least one
+ whitespace.
+
+ See also
+ --------
+ load, save
+ ndarray.tofile
+ loadtxt : More flexible way of loading data from a text file.
+
+ Notes
+ -----
+ Do not rely on the combination of `tofile` and `fromfile` for
+ data storage, as the binary files generated are are not platform
+ independent. In particular, no byte-order or data-type information is
+ saved. Data can be stored in the platform independent ``.npy`` format
+ using `save` and `load` instead.
+
+ Examples
+ --------
+ Construct an ndarray:
+
+ >>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
+ ... ('temp', float)])
+ >>> x = np.zeros((1,), dtype=dt)
+ >>> x['time']['min'] = 10; x['temp'] = 98.25
+ >>> x
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> import os
+ >>> fname = os.tmpnam()
+ >>> x.tofile(fname)
+
+ Read the raw data from disk:
+
+ >>> np.fromfile(fname, dtype=dt)
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> np.save(fname, x)
+ >>> np.load(fname + '.npy')
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '>> dt = np.dtype(int)
+ >>> dt = dt.newbyteorder('>')
+ >>> np.frombuffer(buf, dtype=dt)
+
+ The data of the resulting array will not be byteswapped, but will be
+ interpreted correctly.
+
+ Examples
+ --------
+ >>> s = 'hello world'
+ >>> np.frombuffer(s, dtype='S1', count=5, offset=6)
+ array(['w', 'o', 'r', 'l', 'd'],
+ dtype='|S1')
+
+ >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
+ array([1, 2], dtype=uint8)
+ >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
+ array([1, 2, 3], dtype=uint8)
+
+ """)
+
+add_newdoc('numpy.core', 'fastCopyAndTranspose',
+ """_fastCopyAndTranspose(a)""")
+
+add_newdoc('numpy.core.multiarray', 'correlate',
+ """cross_correlate(a,v, mode=0)""")
+
+add_newdoc('numpy.core.multiarray', 'arange',
+ """
+ arange([start,] stop[, step,], dtype=None)
+
+ Return evenly spaced values within a given interval.
+
+ Values are generated within the half-open interval ``[start, stop)``
+ (in other words, the interval including `start` but excluding `stop`).
+ For integer arguments the function is equivalent to the Python built-in
+ `range` function, but returns an ndarray rather than a list.
+
+ When using a non-integer step, such as 0.1, the results will often not
+ be consistent. It is better to use `numpy.linspace` for these cases.
+
+ Parameters
+ ----------
+ start : number, optional
+ Start of interval. The interval includes this value. The default
+ start value is 0.
+ stop : number
+ End of interval. The interval does not include this value, except
+ in some cases where `step` is not an integer and floating point
+ round-off affects the length of `out`.
+ step : number, optional
+ Spacing between values. For any output `out`, this is the distance
+ between two adjacent values, ``out[i+1] - out[i]``. The default
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+
+ Returns
+ -------
+ arange : ndarray
+ Array of evenly spaced values.
+
+ For floating point arguments, the length of the result is
+ ``ceil((stop - start)/step)``. Because of floating point overflow,
+ this rule may result in the last element of `out` being greater
+ than `stop`.
+
+ See Also
+ --------
+ linspace : Evenly spaced numbers with careful handling of endpoints.
+ ogrid: Arrays of evenly spaced numbers in N-dimensions.
+ mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+
+ Examples
+ --------
+ >>> np.arange(3)
+ array([0, 1, 2])
+ >>> np.arange(3.0)
+ array([ 0., 1., 2.])
+ >>> np.arange(3,7)
+ array([3, 4, 5, 6])
+ >>> np.arange(3,7,2)
+ array([3, 5])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
+ """_get_ndarray_c_version()
+
+ Return the compile time NDARRAY_VERSION number.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', '_reconstruct',
+ """_reconstruct(subtype, shape, dtype)
+
+ Construct an empty array. Used by Pickles.
+
+ """)
+
+
+add_newdoc('numpy.core.multiarray', 'set_string_function',
+ """
+ set_string_function(f, repr=1)
+
+ Internal method to set a function to be used when pretty printing arrays.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
+ """
+ set_numeric_ops(op1=func1, op2=func2, ...)
+
+ Set numerical operators for array objects.
+
+ .. deprecated:: 1.16
+
+ For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
+ For ndarray subclasses, define the ``__array_ufunc__`` method and
+ override the relevant ufunc.
+
+ Parameters
+ ----------
+ op1, op2, ... : callable
+ Each ``op = func`` pair describes an operator to be replaced.
+ For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
+ addition by modulus 5 addition.
+
+ Returns
+ -------
+ saved_ops : list of callables
+ A list of all operators, stored before making replacements.
+
+ Notes
+ -----
+ .. WARNING::
+ Use with care! Incorrect usage may lead to memory errors.
+
+ A function replacing an operator cannot make use of that operator.
+ For example, when replacing add, you may not use ``+``. Instead,
+ directly call ufuncs.
+
+ Examples
+ --------
+ >>> def add_mod5(x, y):
+ ... return np.add(x, y) % 5
+ ...
+ >>> old_funcs = np.set_numeric_ops(add=add_mod5)
+
+ >>> x = np.arange(12).reshape((3, 4))
+ >>> x + x
+ array([[0, 2, 4, 1],
+ [3, 0, 2, 4],
+ [1, 3, 0, 2]])
+
+ >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'promote_types',
+ """
+ promote_types(type1, type2)
+
+ Returns the data type with the smallest size and smallest scalar
+ kind to which both ``type1`` and ``type2`` may be safely cast.
+ The returned data type is always in native byte order.
+
+ This function is symmetric, but rarely associative.
+
+ Parameters
+ ----------
+ type1 : dtype or dtype specifier
+ First data type.
+ type2 : dtype or dtype specifier
+ Second data type.
+
+ Returns
+ -------
+ out : dtype
+ The promoted data type.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ Starting in NumPy 1.9, promote_types function now returns a valid string
+ length when given an integer or float dtype as one argument and a string
+ dtype as another argument. Previously it always returned the input string
+ dtype, even if it wasn't long enough to store the max integer/float value
+ converted to a string.
+
+ See Also
+ --------
+ result_type, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.promote_types('f4', 'f8')
+ dtype('float64')
+
+ >>> np.promote_types('i8', 'f4')
+ dtype('float64')
+
+ >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8')
+ dtype('S11')
+
+ An example of a non-associative case:
+
+ >>> p = np.promote_types
+ >>> p('S', p('i1', 'u1'))
+ dtype('S6')
+ >>> p(p('S', 'i1'), 'u1')
+ dtype('S4')
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'newbuffer',
+ """
+ newbuffer(size)
+
+ Return a new uninitialized buffer object.
+
+ Parameters
+ ----------
+ size : int
+ Size in bytes of returned buffer object.
+
+ Returns
+ -------
+ newbuffer : buffer object
+ Returned, uninitialized buffer object of `size` bytes.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'getbuffer',
+ """
+ getbuffer(obj [,offset[, size]])
+
+ Create a buffer object from the given object referencing a slice of
+ length size starting at offset.
+
+ Default is the entire buffer. A read-write buffer is attempted followed
+ by a read-only buffer.
+
+ Parameters
+ ----------
+ obj : object
+
+ offset : int, optional
+
+ size : int, optional
+
+ Returns
+ -------
+ buffer_obj : buffer
+
+ Examples
+ --------
+ >>> buf = np.getbuffer(np.ones(5), 1, 3)
+ >>> len(buf)
+ 3
+ >>> buf[0]
+ '\\x00'
+ >>> buf
+
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'c_einsum',
+ """
+ c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe')
+
+ *This documentation shadows that of the native python implementation of the `einsum` function,
+ except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout as the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) `. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) `. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) `.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ .. versionadded:: 1.10.0
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ Examples
+ --------
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ """)
+
+
+##############################################################################
+#
+# Documentation for ndarray attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ndarray object
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray',
+ """
+ ndarray(shape, dtype=float, buffer=None, offset=0,
+ strides=None, order=None)
+
+ An array object represents a multidimensional, homogeneous array
+ of fixed-size items. An associated data-type object describes the
+ format of each element in the array (its byte-order, how many bytes it
+ occupies in memory, whether it is an integer, a floating point number,
+ or something else, etc.)
+
+ Arrays should be constructed using `array`, `zeros` or `empty` (refer
+ to the See Also section below). The parameters given here refer to
+ a low-level method (`ndarray(...)`) for instantiating an array.
+
+ For more information, refer to the `numpy` module and examine the
+ methods and attributes of an array.
+
+ Parameters
+ ----------
+ (for the __new__ method; see Notes below)
+
+ shape : tuple of ints
+ Shape of created array.
+ dtype : data-type, optional
+ Any object that can be interpreted as a numpy data type.
+ buffer : object exposing buffer interface, optional
+ Used to fill the array with data.
+ offset : int, optional
+ Offset of array data in buffer.
+ strides : tuple of ints, optional
+ Strides of data in memory.
+ order : {'C', 'F'}, optional
+ Row-major (C-style) or column-major (Fortran-style) order.
+
+ Attributes
+ ----------
+ T : ndarray
+ Transpose of the array.
+ data : buffer
+ The array's elements, in memory.
+ dtype : dtype object
+ Describes the format of the elements in the array.
+ flags : dict
+ Dictionary containing information related to memory use, e.g.,
+ 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
+ flat : numpy.flatiter object
+ Flattened version of the array as an iterator. The iterator
+ allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
+ assignment examples; TODO).
+ imag : ndarray
+ Imaginary part of the array.
+ real : ndarray
+ Real part of the array.
+ size : int
+ Number of elements in the array.
+ itemsize : int
+ The memory use of each array element in bytes.
+ nbytes : int
+ The total number of bytes required to store the array data,
+ i.e., ``itemsize * size``.
+ ndim : int
+ The array's number of dimensions.
+ shape : tuple of ints
+ Shape of the array.
+ strides : tuple of ints
+ The step-size required to move from one element to the next in
+ memory. For example, a contiguous ``(3, 4)`` array of type
+ ``int16`` in C-order has strides ``(8, 2)``. This implies that
+ to move from element to element in memory requires jumps of 2 bytes.
+ To move from row-to-row, one needs to jump 8 bytes at a time
+ (``2 * 4``).
+ ctypes : ctypes object
+ Class containing properties of the array needed for interaction
+ with ctypes.
+ base : ndarray
+ If the array is a view into another array, that array is its `base`
+ (unless that array is also a view). The `base` array is where the
+ array data is actually stored.
+
+ See Also
+ --------
+ array : Construct an array.
+ zeros : Create an array, each element of which is zero.
+ empty : Create an array, but leave its allocated memory unchanged (i.e.,
+ it contains "garbage").
+ dtype : Create a data-type.
+
+ Notes
+ -----
+ There are two modes of creating an array using ``__new__``:
+
+ 1. If `buffer` is None, then only `shape`, `dtype`, and `order`
+ are used.
+ 2. If `buffer` is an object exposing the buffer interface, then
+ all keywords are interpreted.
+
+ No ``__init__`` method is needed because the array is fully initialized
+ after the ``__new__`` method.
+
+ Examples
+ --------
+ These examples illustrate the low-level `ndarray` constructor. Refer
+ to the `See Also` section above for easier ways of constructing an
+ ndarray.
+
+ First mode, `buffer` is None:
+
+ >>> np.ndarray(shape=(2,2), dtype=float, order='F')
+ array([[ -1.13698227e+002, 4.25087011e-303],
+ [ 2.88528414e-306, 3.27025015e-309]]) #random
+
+ Second mode:
+
+ >>> np.ndarray((2,), buffer=np.array([1,2,3]),
+ ... offset=np.int_().itemsize,
+ ... dtype=int) # offset = 1*itemsize, i.e. skip first element
+ array([2, 3])
+
+ """)
+
+
+##############################################################################
+#
+# ndarray attributes
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
+ """Array protocol: Python side."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
+ """None."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
+ """Array priority."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
+ """Array protocol: C-struct side."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
+ """Allow the array to be interpreted as a ctypes object by returning the
+ data-memory location as an integer
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
+ """
+ Base object if memory is from some other object.
+
+ Examples
+ --------
+ The base of an array that owns its memory is None:
+
+ >>> x = np.array([1,2,3,4])
+ >>> x.base is None
+ True
+
+ Slicing creates a view, whose memory is shared with x:
+
+ >>> y = x[2:]
+ >>> y.base is x
+ True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
+ """
+ An object to simplify the interaction of the array with the ctypes
+ module.
+
+ This attribute creates an object that makes it easier to use arrays
+ when calling shared libraries with the ctypes module. The returned
+ object has, among others, data, shape, and strides attributes (see
+ Notes below) which themselves return ctypes objects that can be used
+ as arguments to a shared library.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ c : Python object
+ Possessing attributes data, shape, strides, etc.
+
+ See Also
+ --------
+ numpy.ctypeslib
+
+ Notes
+ -----
+ Below are the public attributes of this object which were documented
+ in "Guide to NumPy" (we have omitted undocumented public attributes,
+ as well as documented private attributes):
+
+ .. autoattribute:: numpy.core._internal._ctypes.data
+
+ .. autoattribute:: numpy.core._internal._ctypes.shape
+
+ .. autoattribute:: numpy.core._internal._ctypes.strides
+
+ .. automethod:: numpy.core._internal._ctypes.data_as
+
+ .. automethod:: numpy.core._internal._ctypes.shape_as
+
+ .. automethod:: numpy.core._internal._ctypes.strides_as
+
+ If the ctypes module is not available, then the ctypes attribute
+ of array objects still returns something useful, but ctypes objects
+ are not returned and errors may be raised instead. In particular,
+ the object will still have the as parameter attribute which will
+ return an integer equal to the data attribute.
+
+ Examples
+ --------
+ >>> import ctypes
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+ >>> x.ctypes.data
+ 30439712
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
+
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
+ c_long(0)
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
+ c_longlong(4294967296L)
+ >>> x.ctypes.shape
+
+ >>> x.ctypes.shape_as(ctypes.c_long)
+
+ >>> x.ctypes.strides
+
+ >>> x.ctypes.strides_as(ctypes.c_longlong)
+
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
+ """Python buffer object pointing to the start of the array's data."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
+ """
+ Data-type of the array's elements.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ d : numpy dtype object
+
+ See Also
+ --------
+ numpy.dtype
+
+ Examples
+ --------
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+ >>> x.dtype
+ dtype('int32')
+ >>> type(x.dtype)
+
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
+ """
+ The imaginary part of the array.
+
+ Examples
+ --------
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.imag
+ array([ 0. , 0.70710678])
+ >>> x.imag.dtype
+ dtype('float64')
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
+ """
+ Length of one array element in bytes.
+
+ Examples
+ --------
+ >>> x = np.array([1,2,3], dtype=np.float64)
+ >>> x.itemsize
+ 8
+ >>> x = np.array([1,2,3], dtype=np.complex128)
+ >>> x.itemsize
+ 16
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
+ """
+ Information about the memory layout of the array.
+
+ Attributes
+ ----------
+ C_CONTIGUOUS (C)
+ The data is in a single, C-style contiguous segment.
+ F_CONTIGUOUS (F)
+ The data is in a single, Fortran-style contiguous segment.
+ OWNDATA (O)
+ The array owns the memory it uses or borrows it from another object.
+ WRITEABLE (W)
+ The data area can be written to. Setting this to False locks
+ the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
+ from its base array at creation time, but a view of a writeable
+ array may be subsequently locked while the base array remains writeable.
+ (The opposite is not true, in that a view of a locked array may not
+ be made writeable. However, currently, locking a base object does not
+ lock any views that already reference it, so under that circumstance it
+ is possible to alter the contents of a locked array via a previously
+ created writeable view onto it.) Attempting to change a non-writeable
+ array raises a RuntimeError exception.
+ ALIGNED (A)
+ The data and all elements are aligned appropriately for the hardware.
+ WRITEBACKIFCOPY (X)
+ This array is a copy of some other array. The C-API function
+ PyArray_ResolveWritebackIfCopy must be called before deallocating
+ to the base array will be updated with the contents of this array.
+ UPDATEIFCOPY (U)
+ (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
+ When this array is
+ deallocated, the base array will be updated with the contents of
+ this array.
+ FNC
+ F_CONTIGUOUS and not C_CONTIGUOUS.
+ FORC
+ F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
+ BEHAVED (B)
+ ALIGNED and WRITEABLE.
+ CARRAY (CA)
+ BEHAVED and C_CONTIGUOUS.
+ FARRAY (FA)
+ BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
+
+ Notes
+ -----
+ The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
+ or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
+ names are only supported in dictionary access.
+
+ Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
+ changed by the user, via direct assignment to the attribute or dictionary
+ entry, or by calling `ndarray.setflags`.
+
+ The array flags cannot be set arbitrarily:
+
+ - UPDATEIFCOPY can only be set ``False``.
+ - WRITEBACKIFCOPY can only be set ``False``.
+ - ALIGNED can only be set ``True`` if the data is truly aligned.
+ - WRITEABLE can only be set ``True`` if the array owns its own memory
+ or the ultimate owner of the memory exposes a writeable buffer
+ interface or is a string.
+
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true.
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
+ """
+ A 1-D iterator over the array.
+
+ This is a `numpy.flatiter` instance, which acts similarly to, but is not
+ a subclass of, Python's built-in iterator object.
+
+ See Also
+ --------
+ flatten : Return a copy of the array collapsed into one dimension.
+
+ flatiter
+
+ Examples
+ --------
+ >>> x = np.arange(1, 7).reshape(2, 3)
+ >>> x
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> x.flat[3]
+ 4
+ >>> x.T
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> x.T.flat[3]
+ 5
+ >>> type(x.flat)
+
+
+ An assignment example:
+
+ >>> x.flat = 3; x
+ array([[3, 3, 3],
+ [3, 3, 3]])
+ >>> x.flat[[1,4]] = 1; x
+ array([[3, 1, 3],
+ [3, 1, 3]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
+ """
+ Total bytes consumed by the elements of the array.
+
+ Notes
+ -----
+ Does not include memory consumed by non-element attributes of the
+ array object.
+
+ Examples
+ --------
+ >>> x = np.zeros((3,5,2), dtype=np.complex128)
+ >>> x.nbytes
+ 480
+ >>> np.prod(x.shape) * x.itemsize
+ 480
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
+ """
+ Number of array dimensions.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> x.ndim
+ 1
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.ndim
+ 3
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
+ """
+ The real part of the array.
+
+ Examples
+ --------
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.real
+ array([ 1. , 0.70710678])
+ >>> x.real.dtype
+ dtype('float64')
+
+ See Also
+ --------
+ numpy.real : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
+ """
+ Tuple of array dimensions.
+
+ The shape property is usually used to get the current shape of an array,
+ but may also be used to reshape the array in-place by assigning a tuple of
+ array dimensions to it. As with `numpy.reshape`, one of the new shape
+ dimensions can be -1, in which case its value is inferred from the size of
+ the array and the remaining dimensions. Reshaping an array in-place will
+ fail if a copy is required.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3, 4])
+ >>> x.shape
+ (4,)
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.shape
+ (2, 3, 4)
+ >>> y.shape = (3, 8)
+ >>> y
+ array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.]])
+ >>> y.shape = (3, 6)
+ Traceback (most recent call last):
+ File "", line 1, in
+ ValueError: total size of new array must be unchanged
+ >>> np.zeros((4,2))[::2].shape = (-1,)
+ Traceback (most recent call last):
+ File "", line 1, in
+ AttributeError: incompatible shape for a non-contiguous array
+
+ See Also
+ --------
+ numpy.reshape : similar function
+ ndarray.reshape : similar method
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
+ """
+ Number of elements in the array.
+
+ Equal to ``np.prod(a.shape)``, i.e., the product of the array's
+ dimensions.
+
+ Notes
+ -----
+ `a.size` returns a standard arbitrary precision Python integer. This
+ may not be the case with other methods of obtaining the same value
+ (like the suggested ``np.prod(a.shape)``, which returns an instance
+ of ``np.int_``), and may be relevant if the value is used further in
+ calculations that may overflow a fixed size integer type.
+
+ Examples
+ --------
+ >>> x = np.zeros((3, 5, 2), dtype=np.complex128)
+ >>> x.size
+ 30
+ >>> np.prod(x.shape)
+ 30
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
+ """
+ Tuple of bytes to step in each dimension when traversing an array.
+
+ The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
+ is::
+
+ offset = sum(np.array(i) * a.strides)
+
+ A more detailed explanation of strides can be found in the
+ "ndarray.rst" file in the NumPy reference guide.
+
+ Notes
+ -----
+ Imagine an array of 32-bit integers (each 4 bytes)::
+
+ x = np.array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]], dtype=np.int32)
+
+ This array is stored in memory as 40 bytes, one after the other
+ (known as a contiguous block of memory). The strides of an array tell
+ us how many bytes we have to skip in memory to move to the next position
+ along a certain axis. For example, we have to skip 4 bytes (1 value) to
+ move to the next column, but 20 bytes (5 values) to get to the same
+ position in the next row. As such, the strides for the array `x` will be
+ ``(20, 4)``.
+
+ See Also
+ --------
+ numpy.lib.stride_tricks.as_strided
+
+ Examples
+ --------
+ >>> y = np.reshape(np.arange(2*3*4), (2,3,4))
+ >>> y
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]])
+ >>> y.strides
+ (48, 16, 4)
+ >>> y[1,1,1]
+ 17
+ >>> offset=sum(y.strides * np.array((1,1,1)))
+ >>> offset/y.itemsize
+ 17
+
+ >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
+ >>> x.strides
+ (32, 4, 224, 1344)
+ >>> i = np.array([3,5,2,2])
+ >>> offset = sum(i * x.strides)
+ >>> x[3,5,2,2]
+ 813
+ >>> offset / x.itemsize
+ 813
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
+ """
+ Same as self.transpose(), except that self is returned if
+ self.ndim < 2.
+
+ Examples
+ --------
+ >>> x = np.array([[1.,2.],[3.,4.]])
+ >>> x
+ array([[ 1., 2.],
+ [ 3., 4.]])
+ >>> x.T
+ array([[ 1., 3.],
+ [ 2., 4.]])
+ >>> x = np.array([1.,2.,3.,4.])
+ >>> x
+ array([ 1., 2., 3., 4.])
+ >>> x.T
+ array([ 1., 2., 3., 4.])
+
+ """))
+
+
+##############################################################################
+#
+# ndarray methods
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
+ """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
+
+ Returns either a new reference to self if dtype is not given or a new array
+ of provided data type if dtype is different from the current dtype of the
+ array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
+ """a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
+ """a.__array_wrap__(obj) -> Object of same type as ndarray object a.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
+ """a.__copy__()
+
+ Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
+
+ Equivalent to ``a.copy(order='K')``.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
+ """a.__deepcopy__(memo, /) -> Deep copy of array.
+
+ Used if :func:`copy.deepcopy` is called on an array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
+ """a.__reduce__()
+
+ For pickling.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
+ """a.__setstate__(state, /)
+
+ For unpickling.
+
+ The `state` argument must be a sequence that contains the following
+ elements:
+
+ Parameters
+ ----------
+ version : int
+ optional pickle version. If omitted defaults to 0.
+ shape : tuple
+ dtype : data-type
+ isFortran : bool
+ rawdata : string or list
+ a binary string with the data (or a list if 'a' is an object array)
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
+ """
+ a.all(axis=None, out=None, keepdims=False)
+
+ Returns True if all elements evaluate to True.
+
+ Refer to `numpy.all` for full documentation.
+
+ See Also
+ --------
+ numpy.all : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
+ """
+ a.any(axis=None, out=None, keepdims=False)
+
+ Returns True if any of the elements of `a` evaluate to True.
+
+ Refer to `numpy.any` for full documentation.
+
+ See Also
+ --------
+ numpy.any : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
+ """
+ a.argmax(axis=None, out=None)
+
+ Return indices of the maximum values along the given axis.
+
+ Refer to `numpy.argmax` for full documentation.
+
+ See Also
+ --------
+ numpy.argmax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
+ """
+ a.argmin(axis=None, out=None)
+
+ Return indices of the minimum values along the given axis of `a`.
+
+ Refer to `numpy.argmin` for detailed documentation.
+
+ See Also
+ --------
+ numpy.argmin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
+ """
+ a.argsort(axis=-1, kind='quicksort', order=None)
+
+ Returns the indices that would sort this array.
+
+ Refer to `numpy.argsort` for full documentation.
+
+ See Also
+ --------
+ numpy.argsort : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
+ """
+ a.argpartition(kth, axis=-1, kind='introselect', order=None)
+
+ Returns the indices that would partition this array.
+
+ Refer to `numpy.argpartition` for full documentation.
+
+ .. versionadded:: 1.8.0
+
+ See Also
+ --------
+ numpy.argpartition : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
+ """
+ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
+
+ Copy of the array, cast to a specified type.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ Typecode or data-type to which the array is cast.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout order of the result.
+ 'C' means C order, 'F' means Fortran order, 'A'
+ means 'F' order if all the arrays are Fortran contiguous,
+ 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'unsafe'
+ for backwards compatibility.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through (default), otherwise
+ the returned array will be forced to be a base-class array.
+ copy : bool, optional
+ By default, astype always returns a newly allocated array. If this
+ is set to false, and the `dtype`, `order`, and `subok`
+ requirements are satisfied, the input array is returned instead
+ of a copy.
+
+ Returns
+ -------
+ arr_t : ndarray
+ Unless `copy` is False and the other conditions for returning the input
+ array are satisfied (see description for `copy` input parameter), `arr_t`
+ is a new array of the same shape as the input array, with dtype, order
+ given by `dtype`, `order`.
+
+ Notes
+ -----
+ Starting in NumPy 1.9, astype method now returns an error if the string
+ dtype to cast to is not long enough in 'safe' casting mode to hold the max
+ value of integer/float array that is being casted. Previously the casting
+ was allowed even if the result was truncated.
+
+ Raises
+ ------
+ ComplexWarning
+ When casting from complex to float or int. To avoid this,
+ one should use ``a.real.astype(t)``.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 2.5])
+ >>> x
+ array([ 1. , 2. , 2.5])
+
+ >>> x.astype(int)
+ array([1, 2, 2])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
+ """
+ a.byteswap(inplace=False)
+
+ Swap the bytes of the array elements
+
+ Toggle between low-endian and big-endian data representation by
+ returning a byteswapped array, optionally swapped in-place.
+
+ Parameters
+ ----------
+ inplace : bool, optional
+ If ``True``, swap bytes in-place, default is ``False``.
+
+ Returns
+ -------
+ out : ndarray
+ The byteswapped array. If `inplace` is ``True``, this is
+ a view to self.
+
+ Examples
+ --------
+ >>> A = np.array([1, 256, 8755], dtype=np.int16)
+ >>> map(hex, A)
+ ['0x1', '0x100', '0x2233']
+ >>> A.byteswap(inplace=True)
+ array([ 256, 1, 13090], dtype=int16)
+ >>> map(hex, A)
+ ['0x100', '0x1', '0x3322']
+
+ Arrays of strings are not swapped
+
+ >>> A = np.array(['ceg', 'fac'])
+ >>> A.byteswap()
+ array(['ceg', 'fac'],
+ dtype='|S3')
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
+ """
+ a.choose(choices, out=None, mode='raise')
+
+ Use an index array to construct a new array from a set of choices.
+
+ Refer to `numpy.choose` for full documentation.
+
+ See Also
+ --------
+ numpy.choose : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
+ """
+ a.clip(min=None, max=None, out=None)
+
+ Return an array whose values are limited to ``[min, max]``.
+ One of max or min must be given.
+
+ Refer to `numpy.clip` for full documentation.
+
+ See Also
+ --------
+ numpy.clip : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
+ """
+ a.compress(condition, axis=None, out=None)
+
+ Return selected slices of this array along given axis.
+
+ Refer to `numpy.compress` for full documentation.
+
+ See Also
+ --------
+ numpy.compress : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
+ """
+ a.conj()
+
+ Complex-conjugate all elements.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
+ """
+ a.conjugate()
+
+ Return the complex conjugate, element-wise.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
+ """
+ a.copy(order='C')
+
+ Return a copy of the array.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the copy. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible. (Note that this function and :func:`numpy.copy` are very
+ similar, but have different default values for their order=
+ arguments.)
+
+ See also
+ --------
+ numpy.copy
+ numpy.copyto
+
+ Examples
+ --------
+ >>> x = np.array([[1,2,3],[4,5,6]], order='F')
+
+ >>> y = x.copy()
+
+ >>> x.fill(0)
+
+ >>> x
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+ >>> y
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> y.flags['C_CONTIGUOUS']
+ True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
+ """
+ a.cumprod(axis=None, dtype=None, out=None)
+
+ Return the cumulative product of the elements along the given axis.
+
+ Refer to `numpy.cumprod` for full documentation.
+
+ See Also
+ --------
+ numpy.cumprod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
+ """
+ a.cumsum(axis=None, dtype=None, out=None)
+
+ Return the cumulative sum of the elements along the given axis.
+
+ Refer to `numpy.cumsum` for full documentation.
+
+ See Also
+ --------
+ numpy.cumsum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
+ """
+ a.diagonal(offset=0, axis1=0, axis2=1)
+
+ Return specified diagonals. In NumPy 1.9 the returned array is a
+ read-only view instead of a copy as in previous NumPy versions. In
+ a future version the read-only restriction will be removed.
+
+ Refer to :func:`numpy.diagonal` for full documentation.
+
+ See Also
+ --------
+ numpy.diagonal : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
+ """
+ a.dot(b, out=None)
+
+ Dot product of two arrays.
+
+ Refer to `numpy.dot` for full documentation.
+
+ See Also
+ --------
+ numpy.dot : equivalent function
+
+ Examples
+ --------
+ >>> a = np.eye(2)
+ >>> b = np.ones((2, 2)) * 2
+ >>> a.dot(b)
+ array([[ 2., 2.],
+ [ 2., 2.]])
+
+ This array method can be conveniently chained:
+
+ >>> a.dot(b).dot(b)
+ array([[ 8., 8.],
+ [ 8., 8.]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
+ """a.dump(file)
+
+ Dump a pickle of the array to the specified file.
+ The array can be read back with pickle.load or numpy.load.
+
+ Parameters
+ ----------
+ file : str
+ A string naming the dump file.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
+ """
+ a.dumps()
+
+ Returns the pickle of the array as a string.
+ pickle.loads or numpy.loads will convert the string back to an array.
+
+ Parameters
+ ----------
+ None
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
+ """
+ a.fill(value)
+
+ Fill the array with a scalar value.
+
+ Parameters
+ ----------
+ value : scalar
+ All elements of `a` will be assigned this value.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2])
+ >>> a.fill(0)
+ >>> a
+ array([0, 0])
+ >>> a = np.empty(2)
+ >>> a.fill(1)
+ >>> a
+ array([ 1., 1.])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
+ """
+ a.flatten(order='C')
+
+ Return a copy of the array collapsed into one dimension.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order.
+ 'F' means to flatten in column-major (Fortran-
+ style) order. 'A' means to flatten in column-major
+ order if `a` is Fortran *contiguous* in memory,
+ row-major order otherwise. 'K' means to flatten
+ `a` in the order the elements occur in memory.
+ The default is 'C'.
+
+ Returns
+ -------
+ y : ndarray
+ A copy of the input array, flattened to one dimension.
+
+ See Also
+ --------
+ ravel : Return a flattened array.
+ flat : A 1-D flat iterator over the array.
+
+ Examples
+ --------
+ >>> a = np.array([[1,2], [3,4]])
+ >>> a.flatten()
+ array([1, 2, 3, 4])
+ >>> a.flatten('F')
+ array([1, 3, 2, 4])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
+ """
+ a.getfield(dtype, offset=0)
+
+ Returns a field of the given array as a certain type.
+
+ A field is a view of the array data with a given data-type. The values in
+ the view are determined by the given type and the offset into the current
+ array in bytes. The offset needs to be such that the view dtype fits in the
+ array dtype; for example an array of dtype complex128 has 16-byte elements.
+ If taking a view with a 32-bit integer (4 bytes), the offset needs to be
+ between 0 and 12 bytes.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ The data type of the view. The dtype size of the view can not be larger
+ than that of the array itself.
+ offset : int
+ Number of bytes to skip before beginning the element view.
+
+ Examples
+ --------
+ >>> x = np.diag([1.+1.j]*2)
+ >>> x[1, 1] = 2 + 4.j
+ >>> x
+ array([[ 1.+1.j, 0.+0.j],
+ [ 0.+0.j, 2.+4.j]])
+ >>> x.getfield(np.float64)
+ array([[ 1., 0.],
+ [ 0., 2.]])
+
+ By choosing an offset of 8 bytes we can select the complex part of the
+ array for our view:
+
+ >>> x.getfield(np.float64, offset=8)
+ array([[ 1., 0.],
+ [ 0., 4.]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
+ """
+ a.item(*args)
+
+ Copy an element of an array to a standard Python scalar and return it.
+
+ Parameters
+ ----------
+ \\*args : Arguments (variable number and type)
+
+ * none: in this case, the method only works for arrays
+ with one element (`a.size == 1`), which element is
+ copied into a standard Python scalar object and returned.
+
+ * int_type: this argument is interpreted as a flat index into
+ the array, specifying which element to copy and return.
+
+ * tuple of int_types: functions as does a single int_type argument,
+ except that the argument is interpreted as an nd-index into the
+ array.
+
+ Returns
+ -------
+ z : Standard Python scalar object
+ A copy of the specified element of the array as a suitable
+ Python scalar
+
+ Notes
+ -----
+ When the data type of `a` is longdouble or clongdouble, item() returns
+ a scalar array object because there is no available Python scalar that
+ would not lose information. Void arrays return a buffer object for item(),
+ unless fields are defined, in which case a tuple is returned.
+
+ `item` is very similar to a[args], except, instead of an array scalar,
+ a standard Python scalar is returned. This can be useful for speeding up
+ access to elements of the array and doing arithmetic on elements of the
+ array using Python's optimized math.
+
+ Examples
+ --------
+ >>> x = np.random.randint(9, size=(3, 3))
+ >>> x
+ array([[3, 1, 7],
+ [2, 8, 3],
+ [8, 5, 3]])
+ >>> x.item(3)
+ 2
+ >>> x.item(7)
+ 5
+ >>> x.item((0, 1))
+ 1
+ >>> x.item((2, 2))
+ 3
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
+ """
+ a.itemset(*args)
+
+ Insert scalar into an array (scalar is cast to array's dtype, if possible)
+
+ There must be at least 1 argument, and define the last argument
+ as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
+ than ``a[args] = item``. The item should be a scalar value and `args`
+ must select a single item in the array `a`.
+
+ Parameters
+ ----------
+ \\*args : Arguments
+ If one argument: a scalar, only used in case `a` is of size 1.
+ If two arguments: the last argument is the value to be set
+ and must be a scalar, the first argument specifies a single array
+ element location. It is either an int or a tuple.
+
+ Notes
+ -----
+ Compared to indexing syntax, `itemset` provides some speed increase
+ for placing a scalar into a particular location in an `ndarray`,
+ if you must do this. However, generally this is discouraged:
+ among other problems, it complicates the appearance of the code.
+ Also, when using `itemset` (and `item`) inside a loop, be sure
+ to assign the methods to a local variable to avoid the attribute
+ look-up at each loop iteration.
+
+ Examples
+ --------
+ >>> x = np.random.randint(9, size=(3, 3))
+ >>> x
+ array([[3, 1, 7],
+ [2, 8, 3],
+ [8, 5, 3]])
+ >>> x.itemset(4, 0)
+ >>> x.itemset((2, 2), 9)
+ >>> x
+ array([[3, 1, 7],
+ [2, 0, 3],
+ [8, 5, 9]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
+ """
+ a.max(axis=None, out=None, keepdims=False)
+
+ Return the maximum along a given axis.
+
+ Refer to `numpy.amax` for full documentation.
+
+ See Also
+ --------
+ numpy.amax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
+ """
+ a.mean(axis=None, dtype=None, out=None, keepdims=False)
+
+ Returns the average of the array elements along given axis.
+
+ Refer to `numpy.mean` for full documentation.
+
+ See Also
+ --------
+ numpy.mean : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
+ """
+ a.min(axis=None, out=None, keepdims=False)
+
+ Return the minimum along a given axis.
+
+ Refer to `numpy.amin` for full documentation.
+
+ See Also
+ --------
+ numpy.amin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'shares_memory',
+ """
+ shares_memory(a, b, max_work=None)
+
+ Determine if two arrays share memory
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=MAY_SHARE_EXACT (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays.
+ max_work=MAY_SHARE_BOUNDS
+ Only the memory bounds of a and b are checked.
+
+ Raises
+ ------
+ numpy.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
+ """)
+
+
+add_newdoc('numpy.core.multiarray', 'may_share_memory',
+ """
+ may_share_memory(a, b, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """)
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
+ """
+ arr.newbyteorder(new_order='S')
+
+ Return the array with the same data viewed with a different byte order.
+
+ Equivalent to::
+
+ arr.view(arr.dtype.newbytorder(new_order))
+
+ Changes are also made in all fields and sub-arrays of the array data
+ type.
+
+
+
+ Parameters
+ ----------
+ new_order : string, optional
+ Byte order to force; a value from the byte order specifications
+ below. `new_order` codes can be any of:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'L'} - little endian
+ * {'>', 'B'} - big endian
+ * {'=', 'N'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ The default value ('S') results in swapping the current
+ byte order. The code does a case-insensitive check on the first
+ letter of `new_order` for the alternatives above. For example,
+ any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
+
+
+ Returns
+ -------
+ new_arr : array
+ New array object with the dtype reflecting given change to the
+ byte order.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
+ """
+ a.nonzero()
+
+ Return the indices of the elements that are non-zero.
+
+ Refer to `numpy.nonzero` for full documentation.
+
+ See Also
+ --------
+ numpy.nonzero : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
+ """
+ a.prod(axis=None, dtype=None, out=None, keepdims=False)
+
+ Return the product of the array elements over the given axis
+
+ Refer to `numpy.prod` for full documentation.
+
+ See Also
+ --------
+ numpy.prod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
+ """
+ a.ptp(axis=None, out=None, keepdims=False)
+
+ Peak to peak (maximum - minimum) value along a given axis.
+
+ Refer to `numpy.ptp` for full documentation.
+
+ See Also
+ --------
+ numpy.ptp : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
+ """
+ a.put(indices, values, mode='raise')
+
+ Set ``a.flat[n] = values[n]`` for all `n` in indices.
+
+ Refer to `numpy.put` for full documentation.
+
+ See Also
+ --------
+ numpy.put : equivalent function
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'copyto',
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'putmask',
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : array_like
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """)
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
+ """
+ a.ravel([order])
+
+ Return a flattened array.
+
+ Refer to `numpy.ravel` for full documentation.
+
+ See Also
+ --------
+ numpy.ravel : equivalent function
+
+ ndarray.flat : a flat iterator on the array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
+ """
+ a.repeat(repeats, axis=None)
+
+ Repeat elements of an array.
+
+ Refer to `numpy.repeat` for full documentation.
+
+ See Also
+ --------
+ numpy.repeat : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
+ """
+ a.reshape(shape, order='C')
+
+ Returns an array containing the same data with a new shape.
+
+ Refer to `numpy.reshape` for full documentation.
+
+ See Also
+ --------
+ numpy.reshape : equivalent function
+
+ Notes
+ -----
+ Unlike the free function `numpy.reshape`, this method on `ndarray` allows
+ the elements of the shape parameter to be passed in as separate arguments.
+ For example, ``a.reshape(10, 11)`` is equivalent to
+ ``a.reshape((10, 11))``.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
+ """
+ a.resize(new_shape, refcheck=True)
+
+ Change shape and size of array in-place.
+
+ Parameters
+ ----------
+ new_shape : tuple of ints, or `n` ints
+ Shape of resized array.
+ refcheck : bool, optional
+ If False, reference count will not be checked. Default is True.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ If `a` does not own its own data or references or views to it exist,
+ and the data memory must be changed.
+ PyPy only: will always raise if the data memory must be changed, since
+ there is no reliable way to determine if references or views to it
+ exist.
+
+ SystemError
+ If the `order` keyword argument is specified. This behaviour is a
+ bug in NumPy.
+
+ See Also
+ --------
+ resize : Return a new array with the specified shape.
+
+ Notes
+ -----
+ This reallocates space for the data area if necessary.
+
+ Only contiguous arrays (data elements consecutive in memory) can be
+ resized.
+
+ The purpose of the reference count check is to make sure you
+ do not use this array as a buffer for another Python object and then
+ reallocate the memory. However, reference counts can increase in
+ other ways so if you are sure that you have not shared the memory
+ for this array with another Python object, then you may safely set
+ `refcheck` to False.
+
+ Examples
+ --------
+ Shrinking an array: array is flattened (in the order that the data are
+ stored in memory), resized, and reshaped:
+
+ >>> a = np.array([[0, 1], [2, 3]], order='C')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [1]])
+
+ >>> a = np.array([[0, 1], [2, 3]], order='F')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [2]])
+
+ Enlarging an array: as above, but missing entries are filled with zeros:
+
+ >>> b = np.array([[0, 1], [2, 3]])
+ >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
+ >>> b
+ array([[0, 1, 2],
+ [3, 0, 0]])
+
+ Referencing an array prevents resizing...
+
+ >>> c = a
+ >>> a.resize((1, 1))
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot resize an array that has been referenced ...
+
+ Unless `refcheck` is False:
+
+ >>> a.resize((1, 1), refcheck=False)
+ >>> a
+ array([[0]])
+ >>> c
+ array([[0]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
+ """
+ a.round(decimals=0, out=None)
+
+ Return `a` with each element rounded to the given number of decimals.
+
+ Refer to `numpy.around` for full documentation.
+
+ See Also
+ --------
+ numpy.around : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
+ """
+ a.searchsorted(v, side='left', sorter=None)
+
+ Find indices where elements of v should be inserted in a to maintain order.
+
+ For full documentation, see `numpy.searchsorted`
+
+ See Also
+ --------
+ numpy.searchsorted : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
+ """
+ a.setfield(val, dtype, offset=0)
+
+ Put a value into a specified place in a field defined by a data-type.
+
+ Place `val` into `a`'s field defined by `dtype` and beginning `offset`
+ bytes into the field.
+
+ Parameters
+ ----------
+ val : object
+ Value to be placed in field.
+ dtype : dtype object
+ Data-type of the field in which to place `val`.
+ offset : int, optional
+ The number of bytes into the field at which to place `val`.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ getfield
+
+ Examples
+ --------
+ >>> x = np.eye(3)
+ >>> x.getfield(np.float64)
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+ >>> x.setfield(3, np.int32)
+ >>> x.getfield(np.int32)
+ array([[3, 3, 3],
+ [3, 3, 3],
+ [3, 3, 3]])
+ >>> x
+ array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
+ [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
+ [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
+ >>> x.setfield(np.eye(3), np.int32)
+ >>> x
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
+ """
+ a.setflags(write=None, align=None, uic=None)
+
+ Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
+ respectively.
+
+ These Boolean-valued flags affect how numpy interprets the memory
+ area used by `a` (see Notes below). The ALIGNED flag can only
+ be set to True if the data is actually aligned according to the type.
+ The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
+ to True. The flag WRITEABLE can only be set to True if the array owns its
+ own memory, or the ultimate owner of the memory exposes a writeable buffer
+ interface, or is a string. (The exception for string is made so that
+ unpickling can be done without copying memory.)
+
+ Parameters
+ ----------
+ write : bool, optional
+ Describes whether or not `a` can be written to.
+ align : bool, optional
+ Describes whether or not `a` is aligned properly for its type.
+ uic : bool, optional
+ Describes whether or not `a` is a copy of another "base" array.
+
+ Notes
+ -----
+ Array flags provide information about how the memory area used
+ for the array is to be interpreted. There are 7 Boolean flags
+ in use, only four of which can be changed by the user:
+ WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
+
+ WRITEABLE (W) the data area can be written to;
+
+ ALIGNED (A) the data and strides are aligned appropriately for the hardware
+ (as determined by the compiler);
+
+ UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
+
+ WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
+ by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
+ called, the base array will be updated with the contents of this array.
+
+ All flags can be accessed using the single (upper case) letter as well
+ as the full name.
+
+ Examples
+ --------
+ >>> y
+ array([[3, 1, 7],
+ [2, 0, 0],
+ [8, 5, 9]])
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+ >>> y.setflags(write=0, align=0)
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : False
+ ALIGNED : False
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+ >>> y.setflags(uic=1)
+ Traceback (most recent call last):
+ File "", line 1, in
+ ValueError: cannot set WRITEBACKIFCOPY flag to True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
+ """
+ a.sort(axis=-1, kind='quicksort', order=None)
+
+ Sort an array, in-place.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis along which to sort. Default is -1, which means sort along the
+ last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. Default is 'quicksort'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ See Also
+ --------
+ numpy.sort : Return a sorted copy of an array.
+ argsort : Indirect sort.
+ lexsort : Indirect stable sort on multiple keys.
+ searchsorted : Find elements in sorted array.
+ partition: Partial sort.
+
+ Notes
+ -----
+ See ``sort`` for notes on the different sorting algorithms.
+
+ Examples
+ --------
+ >>> a = np.array([[1,4], [3,1]])
+ >>> a.sort(axis=1)
+ >>> a
+ array([[1, 4],
+ [1, 3]])
+ >>> a.sort(axis=0)
+ >>> a
+ array([[1, 3],
+ [1, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
+ >>> a.sort(order='y')
+ >>> a
+ array([('c', 1), ('a', 2)],
+ dtype=[('x', '|S1'), ('y', '>> a = np.array([3, 4, 2, 1])
+ >>> a.partition(3)
+ >>> a
+ array([2, 1, 3, 4])
+
+ >>> a.partition((1, 3))
+ array([1, 2, 3, 4])
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
+ """
+ a.squeeze(axis=None)
+
+ Remove single-dimensional entries from the shape of `a`.
+
+ Refer to `numpy.squeeze` for full documentation.
+
+ See Also
+ --------
+ numpy.squeeze : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
+ """
+ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
+
+ Returns the standard deviation of the array elements along given axis.
+
+ Refer to `numpy.std` for full documentation.
+
+ See Also
+ --------
+ numpy.std : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
+ """
+ a.sum(axis=None, dtype=None, out=None, keepdims=False)
+
+ Return the sum of the array elements over the given axis.
+
+ Refer to `numpy.sum` for full documentation.
+
+ See Also
+ --------
+ numpy.sum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
+ """
+ a.swapaxes(axis1, axis2)
+
+ Return a view of the array with `axis1` and `axis2` interchanged.
+
+ Refer to `numpy.swapaxes` for full documentation.
+
+ See Also
+ --------
+ numpy.swapaxes : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
+ """
+ a.take(indices, axis=None, out=None, mode='raise')
+
+ Return an array formed from the elements of `a` at the given indices.
+
+ Refer to `numpy.take` for full documentation.
+
+ See Also
+ --------
+ numpy.take : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
+ """
+ a.tofile(fid, sep="", format="%s")
+
+ Write array to a file as text or binary (default).
+
+ Data is always written in 'C' order, independent of the order of `a`.
+ The data produced by this method can be recovered using the function
+ fromfile().
+
+ Parameters
+ ----------
+ fid : file or str
+ An open file object, or a string containing a filename.
+ sep : str
+ Separator between array items for text output.
+ If "" (empty), a binary file is written, equivalent to
+ ``file.write(a.tobytes())``.
+ format : str
+ Format string for text file output.
+ Each entry in the array is formatted to text by first converting
+ it to the closest Python type, and then using "format" % item.
+
+ Notes
+ -----
+ This is a convenience function for quick storage of array data.
+ Information on endianness and precision is lost, so this method is not a
+ good choice for files intended to archive data or transport data between
+ machines with different endianness. Some of these problems can be overcome
+ by outputting the data as text files, at the expense of speed and file
+ size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
+ """
+ a.tolist()
+
+ Return the array as a (possibly nested) list.
+
+ Return a copy of the array data as a (nested) Python list.
+ Data items are converted to the nearest compatible Python type.
+
+ Parameters
+ ----------
+ none
+
+ Returns
+ -------
+ y : list
+ The possibly nested list of array elements.
+
+ Notes
+ -----
+ The array may be recreated, ``a = np.array(a.tolist())``.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2])
+ >>> a.tolist()
+ [1, 2]
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> list(a)
+ [array([1, 2]), array([3, 4])]
+ >>> a.tolist()
+ [[1, 2], [3, 4]]
+
+ """))
+
+
+tobytesdoc = """
+ a.{name}(order='C')
+
+ Construct Python bytes containing the raw data bytes in the array.
+
+ Constructs Python bytes showing a copy of the raw contents of
+ data memory. The bytes object can be produced in either 'C' or 'Fortran',
+ or 'Any' order (the default is 'C'-order). 'Any' order means C-order
+ unless the F_CONTIGUOUS flag in the array is set, in which case it
+ means 'Fortran' order.
+
+ {deprecated}
+
+ Parameters
+ ----------
+ order : {{'C', 'F', None}}, optional
+ Order of the data for multidimensional arrays:
+ C, Fortran, or the same as for the original array.
+
+ Returns
+ -------
+ s : bytes
+ Python bytes exhibiting a copy of `a`'s raw data.
+
+ Examples
+ --------
+ >>> x = np.array([[0, 1], [2, 3]])
+ >>> x.tobytes()
+ b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
+ >>> x.tobytes('C') == x.tobytes()
+ True
+ >>> x.tobytes('F')
+ b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
+
+ """
+
+add_newdoc('numpy.core.multiarray', 'ndarray',
+ ('tostring', tobytesdoc.format(name='tostring',
+ deprecated=
+ 'This function is a compatibility '
+ 'alias for tobytes. Despite its '
+ 'name it returns bytes not '
+ 'strings.')))
+add_newdoc('numpy.core.multiarray', 'ndarray',
+ ('tobytes', tobytesdoc.format(name='tobytes',
+ deprecated='.. versionadded:: 1.9.0')))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
+ """
+ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
+
+ Return the sum along diagonals of the array.
+
+ Refer to `numpy.trace` for full documentation.
+
+ See Also
+ --------
+ numpy.trace : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
+ """
+ a.transpose(*axes)
+
+ Returns a view of the array with axes transposed.
+
+ For a 1-D array, this has no effect. (To change between column and
+ row vectors, first cast the 1-D array into a matrix object.)
+ For a 2-D array, this is the usual matrix transpose.
+ For an n-D array, if axes are given, their order indicates how the
+ axes are permuted (see Examples). If axes are not provided and
+ ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
+ ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
+
+ Parameters
+ ----------
+ axes : None, tuple of ints, or `n` ints
+
+ * None or no argument: reverses the order of the axes.
+
+ * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
+ `i`-th axis becomes `a.transpose()`'s `j`-th axis.
+
+ * `n` ints: same as an n-tuple of the same ints (this form is
+ intended simply as a "convenience" alternative to the tuple form)
+
+ Returns
+ -------
+ out : ndarray
+ View of `a`, with axes suitably permuted.
+
+ See Also
+ --------
+ ndarray.T : Array property returning the array transposed.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.transpose()
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose((1, 0))
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose(1, 0)
+ array([[1, 3],
+ [2, 4]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
+ """
+ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
+
+ Returns the variance of the array elements, along given axis.
+
+ Refer to `numpy.var` for full documentation.
+
+ See Also
+ --------
+ numpy.var : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
+ """
+ a.view(dtype=None, type=None)
+
+ New view of array with the same data.
+
+ Parameters
+ ----------
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16. The
+ default, None, results in the view having the same data-type as `a`.
+ This argument can also be specified as an ndarray sub-class, which
+ then specifies the type of the returned object (this is equivalent to
+ setting the ``type`` parameter).
+ type : Python type, optional
+ Type of the returned view, e.g., ndarray or matrix. Again, the
+ default None results in type preservation.
+
+ Notes
+ -----
+ ``a.view()`` is used two different ways:
+
+ ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+ of the array's memory with a different data-type. This can cause a
+ reinterpretation of the bytes of memory.
+
+ ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+ returns an instance of `ndarray_subclass` that looks at the same array
+ (same shape, dtype, etc.) This does not cause a reinterpretation of the
+ memory.
+
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a
+ regular array to a structured array), then the behavior of the view
+ cannot be predicted just from the superficial appearance of ``a`` (shown
+ by ``print(a)``). It also depends on exactly how ``a`` is stored in
+ memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
+ defined as a slice or transpose, etc., the view may give different
+ results.
+
+
+ Examples
+ --------
+ >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
+
+ Viewing array data using a different type and dtype:
+
+ >>> y = x.view(dtype=np.int16, type=np.matrix)
+ >>> y
+ matrix([[513]], dtype=int16)
+ >>> print(type(y))
+
+
+ Creating a view on a structured array so it can be used in calculations
+
+ >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
+ >>> xv = x.view(dtype=np.int8).reshape(-1,2)
+ >>> xv
+ array([[1, 2],
+ [3, 4]], dtype=int8)
+ >>> xv.mean(0)
+ array([ 2., 3.])
+
+ Making changes to the view changes the underlying array
+
+ >>> xv[0,1] = 20
+ >>> print(x)
+ [(1, 20) (3, 4)]
+
+ Using a view to convert an array to a recarray:
+
+ >>> z = x.view(np.recarray)
+ >>> z.a
+ array([1], dtype=int8)
+
+ Views share data:
+
+ >>> x[0] = (9, 10)
+ >>> z[0]
+ (9, 10)
+
+ Views that change the dtype size (bytes per entry) should normally be
+ avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
+
+ >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
+ >>> y = x[:, 0:2]
+ >>> y
+ array([[1, 2],
+ [4, 5]], dtype=int16)
+ >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
+ Traceback (most recent call last):
+ File "", line 1, in
+ ValueError: new type not compatible with array.
+ >>> z = y.copy()
+ >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
+ array([[(1, 2)],
+ [(4, 5)]], dtype=[('width', '>> oct_array = np.frompyfunc(oct, 1, 1)
+ >>> oct_array(np.array((10, 30, 100)))
+ array([012, 036, 0144], dtype=object)
+ >>> np.array((oct(10), oct(30), oct(100))) # for comparison
+ array(['012', '036', '0144'],
+ dtype='|S4')
+
+ """)
+
+add_newdoc('numpy.core.umath', 'geterrobj',
+ """
+ geterrobj()
+
+ Return the current object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in NumPy. `geterrobj` is used internally by the other
+ functions that get and set error handling behavior (`geterr`, `seterr`,
+ `geterrcall`, `seterrcall`).
+
+ Returns
+ -------
+ errobj : list
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. The information for each error type
+ is contained in three bits of the integer. If we print it in base 8, we
+ can see what treatment is set for "invalid", "under", "over", and
+ "divide" (in that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
+
+ See Also
+ --------
+ seterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrobj() # first get the defaults
+ [10000, 0, None]
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+ >>> old_bufsize = np.setbufsize(20000)
+ >>> old_err = np.seterr(divide='raise')
+ >>> old_handler = np.seterrcall(err_handler)
+ >>> np.geterrobj()
+ [20000, 2, ]
+
+ >>> old_err = np.seterr(all='ignore')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '0'
+ >>> old_err = np.seterr(divide='warn', over='log', under='call',
+ invalid='print')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '4351'
+
+ """)
+
+add_newdoc('numpy.core.umath', 'seterrobj',
+ """
+ seterrobj(errobj)
+
+ Set the object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in NumPy. `seterrobj` is used internally by the other
+ functions that set error handling behavior (`seterr`, `seterrcall`).
+
+ Parameters
+ ----------
+ errobj : list
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. The information for each error type
+ is contained in three bits of the integer. If we print it in base 8, we
+ can see what treatment is set for "invalid", "under", "over", and
+ "divide" (in that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
+
+ See Also
+ --------
+ geterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> old_errobj = np.geterrobj() # first get the defaults
+ >>> old_errobj
+ [10000, 0, None]
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+ >>> new_errobj = [20000, 12, err_handler]
+ >>> np.seterrobj(new_errobj)
+ >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
+ '14'
+ >>> np.geterr()
+ {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
+ >>> np.geterrcall() is err_handler
+ True
+
+ """)
+
+
+##############################################################################
+#
+# compiled_base functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'add_docstring',
+ """
+ add_docstring(obj, docstring)
+
+ Add a docstring to a built-in obj if possible.
+ If the obj already has a docstring raise a RuntimeError
+ If this routine does not know how to add a docstring to the object
+ raise a TypeError
+ """)
+
+add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
+ """
+ add_ufunc_docstring(ufunc, new_docstring)
+
+ Replace the docstring for a ufunc with new_docstring.
+ This method will only work if the current docstring for
+ the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
+
+ Parameters
+ ----------
+ ufunc : numpy.ufunc
+ A ufunc whose current doc is NULL.
+ new_docstring : string
+ The new docstring for the ufunc.
+
+ Notes
+ -----
+ This method allocates memory for new_docstring on
+ the heap. Technically this creates a mempory leak, since this
+ memory will not be reclaimed until the end of the program
+ even if the ufunc itself is removed. However this will only
+ be a problem if the user is repeatedly creating ufuncs with
+ no documentation, adding documentation via add_newdoc_ufunc,
+ and then throwing away the ufunc.
+ """)
+
+add_newdoc('numpy.core.multiarray', 'packbits',
+ """
+ packbits(myarray, axis=None)
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ myarray : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],[64]],[[192],[32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'unpackbits',
+ """
+ unpackbits(myarray, axis=None)
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `myarray` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is either
+ 1-D (if `axis` is None) or the same shape as the input array with unpacking
+ done along the axis specified.
+
+ Parameters
+ ----------
+ myarray : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in a uint8
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+
+ """)
+
+add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
+ """
+ format_float_OSprintf_g(val, precision)
+
+ Print a floating point scalar using the system's printf function,
+ equivalent to:
+
+ printf("%.*g", precision, val);
+
+ for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
+ method is designed to help cross-validate the format_float_* methods.
+
+ Parameters
+ ----------
+ val : python float or numpy floating scalar
+ Value to format.
+
+ precision : non-negative integer, optional
+ Precision given to printf.
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+ format_float_positional
+ """)
+
+
+##############################################################################
+#
+# Documentation for ufunc attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ufunc object
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc',
+ """
+ Functions that operate element by element on whole arrays.
+
+ To see the documentation for a specific ufunc, use `info`. For
+ example, ``np.info(np.sin)``. Because ufuncs are written in C
+ (for speed) and linked into Python with NumPy's ufunc facility,
+ Python's help() function finds this page whenever help() is called
+ on a ufunc.
+
+ A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
+
+ Calling ufuncs:
+ ===============
+
+ op(*x[, out], where=True, **kwargs)
+ Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
+
+ The broadcasting rules are:
+
+ * Dimensions of length 1 may be prepended to either array.
+ * Arrays may be repeated along dimensions of length 1.
+
+ Parameters
+ ----------
+ *x : array_like
+ Input arrays.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ Alternate array object(s) in which to put the result; if provided, it
+ must have a shape that the inputs broadcast to. A tuple of arrays
+ (possible only as a keyword argument) must have length equal to the
+ number of outputs; use `None` for uninitialized outputs to be
+ allocated by the ufunc.
+ where : array_like, optional
+ Values of True indicate to calculate the ufunc at that position, values
+ of False indicate to leave the value in the output alone. Note that if
+ an uninitialized return array is created via the default ``out=None``,
+ then the elements where the values are False will remain uninitialized.
+ **kwargs
+ For other keyword-only arguments, see the :ref:`ufunc docs `.
+
+ Returns
+ -------
+ r : ndarray or tuple of ndarray
+ `r` will have the shape that the arrays in `x` broadcast to; if `out` is
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
+ output, then the result will be a tuple of arrays.
+
+ """)
+
+
+##############################################################################
+#
+# ufunc attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('identity',
+ """
+ The identity value.
+
+ Data attribute containing the identity element for the ufunc, if it has one.
+ If it does not, the attribute value is None.
+
+ Examples
+ --------
+ >>> np.add.identity
+ 0
+ >>> np.multiply.identity
+ 1
+ >>> np.power.identity
+ 1
+ >>> print(np.exp.identity)
+ None
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nargs',
+ """
+ The number of arguments.
+
+ Data attribute containing the number of arguments the ufunc takes, including
+ optional ones.
+
+ Notes
+ -----
+ Typically this value will be one more than what you might expect because all
+ ufuncs take the optional "out" argument.
+
+ Examples
+ --------
+ >>> np.add.nargs
+ 3
+ >>> np.multiply.nargs
+ 3
+ >>> np.power.nargs
+ 3
+ >>> np.exp.nargs
+ 2
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nin',
+ """
+ The number of inputs.
+
+ Data attribute containing the number of arguments the ufunc treats as input.
+
+ Examples
+ --------
+ >>> np.add.nin
+ 2
+ >>> np.multiply.nin
+ 2
+ >>> np.power.nin
+ 2
+ >>> np.exp.nin
+ 1
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nout',
+ """
+ The number of outputs.
+
+ Data attribute containing the number of arguments the ufunc treats as output.
+
+ Notes
+ -----
+ Since all ufuncs can take output arguments, this will always be (at least) 1.
+
+ Examples
+ --------
+ >>> np.add.nout
+ 1
+ >>> np.multiply.nout
+ 1
+ >>> np.power.nout
+ 1
+ >>> np.exp.nout
+ 1
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('ntypes',
+ """
+ The number of types.
+
+ The number of numerical NumPy types - of which there are 18 total - on which
+ the ufunc can operate.
+
+ See Also
+ --------
+ numpy.ufunc.types
+
+ Examples
+ --------
+ >>> np.add.ntypes
+ 18
+ >>> np.multiply.ntypes
+ 18
+ >>> np.power.ntypes
+ 17
+ >>> np.exp.ntypes
+ 7
+ >>> np.remainder.ntypes
+ 14
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('types',
+ """
+ Returns a list with types grouped input->output.
+
+ Data attribute listing the data-type "Domain-Range" groupings the ufunc can
+ deliver. The data-types are given using the character codes.
+
+ See Also
+ --------
+ numpy.ufunc.ntypes
+
+ Examples
+ --------
+ >>> np.add.types
+ ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+ 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+ 'GG->G', 'OO->O']
+
+ >>> np.multiply.types
+ ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+ 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+ 'GG->G', 'OO->O']
+
+ >>> np.power.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+ 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
+ 'OO->O']
+
+ >>> np.exp.types
+ ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
+
+ >>> np.remainder.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+ 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('signature',
+ """
+ Definition of the core elements a generalized ufunc operates on.
+
+ The signature determines how the dimensions of each input/output array
+ are split into core and loop dimensions:
+
+ 1. Each dimension in the signature is matched to a dimension of the
+ corresponding passed-in array, starting from the end of the shape tuple.
+ 2. Core dimensions assigned to the same label in the signature must have
+ exactly matching sizes, no broadcasting is performed.
+ 3. The core dimensions are removed from all inputs and the remaining
+ dimensions are broadcast together, defining the loop dimensions.
+
+ Notes
+ -----
+ Generalized ufuncs are used internally in many linalg functions, and in
+ the testing suite; the examples below are taken from these.
+ For ufuncs that operate on scalars, the signature is `None`, which is
+ equivalent to '()' for every argument.
+
+ Examples
+ --------
+ >>> np.core.umath_tests.matrix_multiply.signature
+ '(m,n),(n,p)->(m,p)'
+ >>> np.linalg._umath_linalg.det.signature
+ '(m,m)->()'
+ >>> np.add.signature is None
+ True # equivalent to '(),()->()'
+ """))
+
+##############################################################################
+#
+# ufunc methods
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('reduce',
+ """
+ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
+
+ Reduces `a`'s dimension by one, by applying ufunc along one axis.
+
+ Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
+ :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+ the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
+ ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+ For a one-dimensional array, reduce produces results equivalent to:
+ ::
+
+ r = op.identity # op = ufunc
+ for i in range(len(A)):
+ r = op(r, A[i])
+ return r
+
+ For example, add.reduce() is equivalent to sum().
+
+ Parameters
+ ----------
+ a : array_like
+ The array to act on.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a reduction is performed.
+ The default (`axis` = 0) is perform a reduction over the first
+ dimension of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is `None`, a reduction is performed over all the axes.
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+
+ For operations which are either not commutative or not associative,
+ doing a reduction over multiple axes is not well-defined. The
+ ufuncs do not currently raise an exception in this case, but will
+ likely do so in the future.
+ dtype : data-type code, optional
+ The type used to represent the intermediate results. Defaults
+ to the data-type of the output array if this is provided, or
+ the data-type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ .. versionadded:: 1.7.0
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+
+ .. versionadded:: 1.15.0
+
+ Returns
+ -------
+ r : ndarray
+ The reduced array. If `out` was supplied, `r` is a reference to it.
+
+ Examples
+ --------
+ >>> np.multiply.reduce([2,3,5])
+ 30
+
+ A multi-dimensional array example:
+
+ >>> X = np.arange(8).reshape((2,2,2))
+ >>> X
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.add.reduce(X, 0)
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X) # confirm: default axis value is 0
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X, 1)
+ array([[ 2, 4],
+ [10, 12]])
+ >>> np.add.reduce(X, 2)
+ array([[ 1, 5],
+ [ 9, 13]])
+
+ You can use the ``initial`` keyword argument to initialize the reduction with a
+ different value.
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
+ array([14., 14.])
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('accumulate',
+ """
+ accumulate(array, axis=0, dtype=None, out=None)
+
+ Accumulate the result of applying the operator to all elements.
+
+ For a one-dimensional array, accumulate produces results equivalent to::
+
+ r = np.empty(len(A))
+ t = op.identity # op = the ufunc being applied to A's elements
+ for i in range(len(A)):
+ t = op(t, A[i])
+ r[i] = t
+ return r
+
+ For example, add.accumulate() is equivalent to np.cumsum().
+
+ For a multi-dimensional array, accumulate is applied along only one
+ axis (axis zero by default; see Examples below) so repeated use is
+ necessary if one wants to accumulate over multiple axes.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ axis : int, optional
+ The axis along which to apply the accumulation; default is zero.
+ dtype : data-type code, optional
+ The data-type used to represent the intermediate results. Defaults
+ to the data-type of the output array if such is provided, or the
+ the data-type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+
+ Returns
+ -------
+ r : ndarray
+ The accumulated values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Examples
+ --------
+ 1-D array examples:
+
+ >>> np.add.accumulate([2, 3, 5])
+ array([ 2, 5, 10])
+ >>> np.multiply.accumulate([2, 3, 5])
+ array([ 2, 6, 30])
+
+ 2-D array examples:
+
+ >>> I = np.eye(2)
+ >>> I
+ array([[ 1., 0.],
+ [ 0., 1.]])
+
+ Accumulate along axis 0 (rows), down columns:
+
+ >>> np.add.accumulate(I, 0)
+ array([[ 1., 0.],
+ [ 1., 1.]])
+ >>> np.add.accumulate(I) # no axis specified = axis zero
+ array([[ 1., 0.],
+ [ 1., 1.]])
+
+ Accumulate along axis 1 (columns), through rows:
+
+ >>> np.add.accumulate(I, 1)
+ array([[ 1., 1.],
+ [ 0., 1.]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('reduceat',
+ """
+ reduceat(a, indices, axis=0, dtype=None, out=None)
+
+ Performs a (local) reduce with specified slices over a single axis.
+
+ For i in ``range(len(indices))``, `reduceat` computes
+ ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
+ generalized "row" parallel to `axis` in the final result (i.e., in a
+ 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
+ `axis = 1`, it becomes the i-th column). There are three exceptions to this:
+
+ * when ``i = len(indices) - 1`` (so for the last index),
+ ``indices[i+1] = a.shape[axis]``.
+ * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
+ simply ``a[indices[i]]``.
+ * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
+
+ The shape of the output depends on the size of `indices`, and may be
+ larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
+
+ Parameters
+ ----------
+ a : array_like
+ The array to act on.
+ indices : array_like
+ Paired indices, comma separated (not colon), specifying slices to
+ reduce.
+ axis : int, optional
+ The axis along which to apply the reduceat.
+ dtype : data-type code, optional
+ The type used to represent the intermediate results. Defaults
+ to the data type of the output array if this is provided, or
+ the data type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+
+ Returns
+ -------
+ r : ndarray
+ The reduced values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Notes
+ -----
+ A descriptive example:
+
+ If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
+ ``ufunc.reduceat(a, indices)[::2]`` where `indices` is
+ ``range(len(array) - 1)`` with a zero placed
+ in every other element:
+ ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
+
+ Don't be fooled by this attribute's name: `reduceat(a)` is not
+ necessarily smaller than `a`.
+
+ Examples
+ --------
+ To take the running sum of four successive values:
+
+ >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
+ array([ 6, 10, 14, 18])
+
+ A 2-D example:
+
+ >>> x = np.linspace(0, 15, 16).reshape(4,4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [ 12., 13., 14., 15.]])
+
+ ::
+
+ # reduce such that the result has the following five rows:
+ # [row1 + row2 + row3]
+ # [row4]
+ # [row2]
+ # [row3]
+ # [row1 + row2 + row3 + row4]
+
+ >>> np.add.reduceat(x, [0, 3, 1, 2, 0])
+ array([[ 12., 15., 18., 21.],
+ [ 12., 13., 14., 15.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [ 24., 28., 32., 36.]])
+
+ ::
+
+ # reduce such that result has the following two columns:
+ # [col1 * col2 * col3, col4]
+
+ >>> np.multiply.reduceat(x, [0, 3], 1)
+ array([[ 0., 3.],
+ [ 120., 7.],
+ [ 720., 11.],
+ [ 2184., 15.]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('outer',
+ """
+ outer(A, B, **kwargs)
+
+ Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
+
+ Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
+ ``op.outer(A, B)`` is an array of dimension M + N such that:
+
+ .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
+ op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
+
+ For `A` and `B` one-dimensional, this is equivalent to::
+
+ r = empty(len(A),len(B))
+ for i in range(len(A)):
+ for j in range(len(B)):
+ r[i,j] = op(A[i], B[j]) # op = ufunc in question
+
+ Parameters
+ ----------
+ A : array_like
+ First array
+ B : array_like
+ Second array
+ kwargs : any
+ Arguments to pass on to the ufunc. Typically `dtype` or `out`.
+
+ Returns
+ -------
+ r : ndarray
+ Output array
+
+ See Also
+ --------
+ numpy.outer
+
+ Examples
+ --------
+ >>> np.multiply.outer([1, 2, 3], [4, 5, 6])
+ array([[ 4, 5, 6],
+ [ 8, 10, 12],
+ [12, 15, 18]])
+
+ A multi-dimensional example:
+
+ >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> A.shape
+ (2, 3)
+ >>> B = np.array([[1, 2, 3, 4]])
+ >>> B.shape
+ (1, 4)
+ >>> C = np.multiply.outer(A, B)
+ >>> C.shape; C
+ (2, 3, 1, 4)
+ array([[[[ 1, 2, 3, 4]],
+ [[ 2, 4, 6, 8]],
+ [[ 3, 6, 9, 12]]],
+ [[[ 4, 8, 12, 16]],
+ [[ 5, 10, 15, 20]],
+ [[ 6, 12, 18, 24]]]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('at',
+ """
+ at(a, indices, b=None)
+
+ Performs unbuffered in place operation on operand 'a' for elements
+ specified by 'indices'. For addition ufunc, this method is equivalent to
+ ``a[indices] += b``, except that results are accumulated for elements that
+ are indexed more than once. For example, ``a[[0,0]] += 1`` will only
+ increment the first element once because of buffering, whereas
+ ``add.at(a, [0,0], 1)`` will increment the first element twice.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ The array to perform in place operation on.
+ indices : array_like or tuple
+ Array like index object or slice object for indexing into first
+ operand. If first operand has multiple dimensions, indices can be a
+ tuple of array like index objects or slice objects.
+ b : array_like
+ Second operand for ufuncs requiring two operands. Operand must be
+ broadcastable over first operand after indexing or slicing.
+
+ Examples
+ --------
+ Set items 0 and 1 to their negative values:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.negative.at(a, [0, 1])
+ >>> print(a)
+ array([-1, -2, 3, 4])
+
+ Increment items 0 and 1, and increment item 2 twice:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.add.at(a, [0, 1, 2, 2], 1)
+ >>> print(a)
+ array([2, 3, 5, 4])
+
+ Add items 0 and 1 in first array to second array,
+ and store results in first array:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> b = np.array([1, 2])
+ >>> np.add.at(a, [0, 1], b)
+ >>> print(a)
+ array([2, 4, 3, 4])
+
+ """))
+
+##############################################################################
+#
+# Documentation for dtype attributes and methods
+#
+##############################################################################
+
+##############################################################################
+#
+# dtype object
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype',
+ """
+ dtype(obj, align=False, copy=False)
+
+ Create a data type object.
+
+ A numpy array is homogeneous, and contains elements described by a
+ dtype object. A dtype object can be constructed from different
+ combinations of fundamental numeric types.
+
+ Parameters
+ ----------
+ obj
+ Object to be converted to a data type object.
+ align : bool, optional
+ Add padding to the fields to match what a C compiler would output
+ for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
+ or a comma-separated string. If a struct dtype is being created,
+ this also sets a sticky alignment flag ``isalignedstruct``.
+ copy : bool, optional
+ Make a new copy of the data-type object. If ``False``, the result
+ may just be a reference to a built-in data-type object.
+
+ See also
+ --------
+ result_type
+
+ Examples
+ --------
+ Using array-scalar type:
+
+ >>> np.dtype(np.int16)
+ dtype('int16')
+
+ Structured type, one field name 'f1', containing int16:
+
+ >>> np.dtype([('f1', np.int16)])
+ dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])])
+ dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)])
+ dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')])
+ dtype([('a', '>> np.dtype("i4, (2,3)f8")
+ dtype([('f0', '>> np.dtype([('hello',(int,3)),('world',np.void,10)])
+ dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
+ dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
+ dtype([('gender', '|S1'), ('age', '|u1')])
+
+ Offsets in bytes, here 0 and 25:
+
+ >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
+ dtype([('surname', '|S25'), ('age', '|u1')])
+
+ """)
+
+##############################################################################
+#
+# dtype attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
+ """
+ The required alignment (bytes) of this data-type according to the compiler.
+
+ More information is available in the C-API section of the manual.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
+ """
+ A character indicating the byte-order of this data-type object.
+
+ One of:
+
+ === ==============
+ '=' native
+ '<' little-endian
+ '>' big-endian
+ '|' not applicable
+ === ==============
+
+ All built-in data-type objects have byteorder either '=' or '|'.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype('i2')
+ >>> dt.byteorder
+ '='
+ >>> # endian is not relevant for 8 bit numbers
+ >>> np.dtype('i1').byteorder
+ '|'
+ >>> # or ASCII strings
+ >>> np.dtype('S2').byteorder
+ '|'
+ >>> # Even if specific code is given, and it is native
+ >>> # '=' is the byteorder
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = sys_is_le and '<' or '>'
+ >>> swapped_code = sys_is_le and '>' or '<'
+ >>> dt = np.dtype(native_code + 'i2')
+ >>> dt.byteorder
+ '='
+ >>> # Swapped code shows up as itself
+ >>> dt = np.dtype(swapped_code + 'i2')
+ >>> dt.byteorder == swapped_code
+ True
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('char',
+ """A unique character code for each of the 21 different built-in types."""))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
+ """
+ `__array_interface__` description of the data-type.
+
+ The format is that required by the 'descr' key in the
+ `__array_interface__` attribute.
+
+ Warning: This attribute exists specifically for `__array_interface__`,
+ and is not a datatype description compatible with `np.dtype`.
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
+ """
+ Dictionary of named fields defined for this data type, or ``None``.
+
+ The dictionary is indexed by keys that are the names of the fields.
+ Each entry in the dictionary is a tuple fully describing the field::
+
+ (dtype, offset[, title])
+
+ Offset is limited to C int, which is signed and usually 32 bits.
+ If present, the optional title can be any object (if it is a string
+ or unicode then it will also be a key in the fields dictionary,
+ otherwise it's meta-data). Notice also that the first two elements
+ of the tuple can be passed directly as arguments to the ``ndarray.getfield``
+ and ``ndarray.setfield`` methods.
+
+ See Also
+ --------
+ ndarray.getfield, ndarray.setfield
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> print(dt.fields)
+ {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
+ """
+ Bit-flags describing how this data type is to be interpreted.
+
+ Bit-masks are in `numpy.core.multiarray` as the constants
+ `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
+ `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
+ of these flags is in C-API documentation; they are largely useful
+ for user-defined data-types.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
+ """
+ Boolean indicating whether this dtype contains any reference-counted
+ objects in any fields or sub-dtypes.
+
+ Recall that what is actually in the ndarray memory representing
+ the Python object is the memory address of that object (a pointer).
+ Special handling may be required, and this attribute is useful for
+ distinguishing data types that may contain arbitrary Python objects
+ and data-types that won't.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
+ """
+ Integer indicating how this dtype relates to the built-in dtypes.
+
+ Read-only.
+
+ = ========================================================================
+ 0 if this is a structured array type, with fields
+ 1 if this is a dtype compiled into numpy (such as ints, floats etc)
+ 2 if the dtype is for a user-defined numpy type
+ A user-defined type uses the numpy C-API machinery to extend
+ numpy to handle a new array type. See
+ :ref:`user.user-defined-data-types` in the NumPy manual.
+ = ========================================================================
+
+ Examples
+ --------
+ >>> dt = np.dtype('i2')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype('f8')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.isbuiltin
+ 0
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
+ """
+ Boolean indicating whether the byte order of this dtype is native
+ to the platform.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
+ """
+ Boolean indicating whether the dtype is a struct which maintains
+ field alignment. This flag is sticky, so when combining multiple
+ structs together, it is preserved and produces new dtypes which
+ are also aligned.
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
+ """
+ The element size of this data-type object.
+
+ For 18 of the 21 types this number is fixed by the data-type.
+ For the flexible data-types, this number can be anything.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
+ """
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
+
+ = ======================
+ b boolean
+ i signed integer
+ u unsigned integer
+ f floating-point
+ c complex floating-point
+ m timedelta
+ M datetime
+ O object
+ S (byte-)string
+ U Unicode
+ V void
+ = ======================
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('name',
+ """
+ A bit-width name for this data-type.
+
+ Un-sized flexible data-type objects do not have this attribute.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('names',
+ """
+ Ordered list of field names, or ``None`` if there are no fields.
+
+ The names are ordered according to increasing byte offset. This can be
+ used, for example, to walk through all of the named fields in offset order.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.names
+ ('name', 'grades')
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('num',
+ """
+ A unique number for each of the 21 different built-in types.
+
+ These are roughly ordered from least-to-most precision.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
+ """
+ Shape tuple of the sub-array if this data type describes a sub-array,
+ and ``()`` otherwise.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
+ """
+ Number of dimensions of the sub-array if this data type describes a
+ sub-array, and ``0`` otherwise.
+
+ .. versionadded:: 1.13.0
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('str',
+ """The array-protocol typestring of this data-type object."""))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
+ """
+ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
+ None otherwise.
+
+ The *shape* is the fixed shape of the sub-array described by this
+ data type, and *item_dtype* the data type of the array.
+
+ If a field whose dtype object has this attribute is retrieved,
+ then the extra dimensions implied by *shape* are tacked on to
+ the end of the retrieved array.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('type',
+ """The type object used to instantiate a scalar of this data-type."""))
+
+##############################################################################
+#
+# dtype methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
+ """
+ newbyteorder(new_order='S')
+
+ Return a new dtype with a different byte order.
+
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Parameters
+ ----------
+ new_order : string, optional
+ Byte order to force; a value from the byte order specifications
+ below. The default value ('S') results in swapping the current
+ byte order. `new_order` codes can be any of:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'L'} - little endian
+ * {'>', 'B'} - big endian
+ * {'=', 'N'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ The code does a case-insensitive check on the first letter of
+ `new_order` for these alternatives. For example, any of '>'
+ or 'B' or 'b' or 'brian' are valid to specify big-endian.
+
+ Returns
+ -------
+ new_dtype : dtype
+ New dtype object with the given change to the byte order.
+
+ Notes
+ -----
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Examples
+ --------
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = sys_is_le and '<' or '>'
+ >>> swapped_code = sys_is_le and '>' or '<'
+ >>> native_dt = np.dtype(native_code+'i2')
+ >>> swapped_dt = np.dtype(swapped_code+'i2')
+ >>> native_dt.newbyteorder('S') == swapped_dt
+ True
+ >>> native_dt.newbyteorder() == swapped_dt
+ True
+ >>> native_dt == swapped_dt.newbyteorder('S')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('=')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('N')
+ True
+ >>> native_dt == native_dt.newbyteorder('|')
+ True
+ >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>')
+ True
+ >>> np.dtype('>i2') == native_dt.newbyteorder('B')
+ True
+
+ """))
+
+
+##############################################################################
+#
+# Datetime-related Methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar',
+ """
+ busdaycalendar(weekmask='1111100', holidays=None)
+
+ A business day calendar object that efficiently stores information
+ defining valid days for the busday family of functions.
+
+ The default valid days are Monday through Friday ("business days").
+ A busdaycalendar object can be specified with any set of weekly
+ valid days, plus an optional "holiday" dates that always will be invalid.
+
+ Once a busdaycalendar object is created, the weekmask and holidays
+ cannot be modified.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates, no matter which
+ weekday they fall upon. Holiday dates may be specified in any
+ order, and NaT (not-a-time) dates are ignored. This list is
+ saved in a normalized form that is suited for fast calculations
+ of valid days.
+
+ Returns
+ -------
+ out : busdaycalendar
+ A business day calendar object containing the specified
+ weekmask and holidays values.
+
+ See Also
+ --------
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Attributes
+ ----------
+ Note: once a busdaycalendar object is created, you cannot modify the
+ weekmask or holidays. The attributes return copies of internal data.
+ weekmask : (copy) seven-element array of bool
+ holidays : (copy) sorted array of datetime64[D]
+
+ Examples
+ --------
+ >>> # Some important days in July
+ ... bdd = np.busdaycalendar(
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ >>> # Default is Monday to Friday weekdays
+ ... bdd.weekmask
+ array([ True, True, True, True, True, False, False], dtype='bool')
+ >>> # Any holidays already on the weekend are removed
+ ... bdd.holidays
+ array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
+ """)
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
+ """A copy of the seven-element boolean mask indicating valid days."""))
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
+ """A copy of the holiday array indicating additional invalid days."""))
+
+add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
+ """
+ normalize_axis_index(axis, ndim, msg_prefix=None)
+
+ Normalizes an axis index, `axis`, such that is a valid positive index into
+ the shape of array with `ndim` dimensions. Raises an AxisError with an
+ appropriate message if this is not possible.
+
+ Used internally by all axis-checking logic.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ axis : int
+ The un-normalized index of the axis. Can be negative
+ ndim : int
+ The number of dimensions of the array that `axis` should be normalized
+ against
+ msg_prefix : str
+ A prefix to put before the message, typically the name of the argument
+
+ Returns
+ -------
+ normalized_axis : int
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+ Raises
+ ------
+ AxisError
+ If the axis index is invalid, when `-ndim <= axis < ndim` is false.
+
+ Examples
+ --------
+ >>> normalize_axis_index(0, ndim=3)
+ 0
+ >>> normalize_axis_index(1, ndim=3)
+ 1
+ >>> normalize_axis_index(-1, ndim=3)
+ 2
+
+ >>> normalize_axis_index(3, ndim=3)
+ Traceback (most recent call last):
+ ...
+ AxisError: axis 3 is out of bounds for array of dimension 3
+ >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
+ Traceback (most recent call last):
+ ...
+ AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
+ """)
+
+add_newdoc('numpy.core.multiarray', 'datetime_data',
+ """
+ datetime_data(dtype, /)
+
+ Get information about the step size of a date or time type.
+
+ The returned tuple can be passed as the second argument of `numpy.datetime64` and
+ `numpy.timedelta64`.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype object, which must be a `datetime64` or `timedelta64` type.
+
+ Returns
+ -------
+ unit : str
+ The :ref:`datetime unit ` on which this dtype
+ is based.
+ count : int
+ The number of base units in a step.
+
+ Examples
+ --------
+ >>> dt_25s = np.dtype('timedelta64[25s]')
+ >>> np.datetime_data(dt_25s)
+ ('s', 25)
+ >>> np.array(10, dt_25s).astype('timedelta64[s]')
+ array(250, dtype='timedelta64[s]')
+
+ The result can be used to construct a datetime that uses the same units
+ as a timedelta
+
+ >>> np.datetime64('2010', np.datetime_data(dt_25s))
+ numpy.datetime64('2010-01-01T00:00:00', '25s')
+ """)
+
+
+##############################################################################
+#
+# Documentation for `generic` attributes and methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ """
+ Base class for numpy scalar types.
+
+ Class from which most (all?) numpy scalar types are derived. For
+ consistency, exposes the same API as `ndarray`, despite many
+ consequent attributes being either "get-only," or completely irrelevant.
+ This is the class from which it is strongly suggested users should derive
+ custom scalar types.
+
+ """)
+
+# Attributes
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('T',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class so as to
+ provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('base',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class so as to
+ a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('data',
+ """Pointer to start of data."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
+ """Get array data-descriptor."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
+ """The integer value of flags."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
+ """A 1-D view of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
+ """The imaginary part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
+ """The length of one element in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
+ """The length of the scalar in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
+ """The number of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('real',
+ """The real part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
+ """Tuple of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('size',
+ """The number of elements in the gentype."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
+ """Tuple of bytes steps in each dimension."""))
+
+# Methods
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('all',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('any',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class so as to
+ provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('item',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('max',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('min',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
+ """
+ newbyteorder(new_order='S')
+
+ Return a new `dtype` with a different byte order.
+
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ The `new_order` code can be any from the following:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'L'} - little endian
+ * {'>', 'B'} - big endian
+ * {'=', 'N'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ Parameters
+ ----------
+ new_order : str, optional
+ Byte order to force; a value from the byte order specifications
+ above. The default value ('S') results in swapping the current
+ byte order. The code does a case-insensitive check on the first
+ letter of `new_order` for the alternatives above. For example,
+ any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
+
+
+ Returns
+ -------
+ new_dtype : dtype
+ New `dtype` object with the given change to the byte order.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('put',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('round',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class so as to
+ provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('std',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('take',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('var',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('view',
+ """
+ Not implemented (virtual attribute)
+
+ Class generic exists solely to derive numpy scalars from, and possesses,
+ albeit unimplemented, all the attributes of the ndarray class
+ so as to provide a uniform API.
+
+ See Also
+ --------
+ The corresponding attribute of the derived class of interest.
+
+ """))
+
+
+##############################################################################
+#
+# Documentation for scalar type abstract base classes in type hierarchy
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.numerictypes', 'number',
+ """
+ Abstract base class of all numeric scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'integer',
+ """
+ Abstract base class of all integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'signedinteger',
+ """
+ Abstract base class of all signed integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
+ """
+ Abstract base class of all unsigned integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'inexact',
+ """
+ Abstract base class of all numeric scalar types with a (potentially)
+ inexact representation of the values in its range, such as
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'floating',
+ """
+ Abstract base class of all floating-point scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'complexfloating',
+ """
+ Abstract base class of all complex number scalar types that are made up of
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'flexible',
+ """
+ Abstract base class of all scalar types without predefined length.
+ The actual size of these types depends on the specific `np.dtype`
+ instantiation.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'character',
+ """
+ Abstract base class of all character string scalar types.
+
+ """)
+
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+ def type_aliases_gen():
+ for alias, doc in aliases:
+ try:
+ alias_type = getattr(_numerictypes, alias)
+ except AttributeError:
+ # The set of aliases that actually exist varies between platforms
+ pass
+ else:
+ yield (alias_type, alias, doc)
+ return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+ ('int8', '8-bit signed integer (-128 to 127)'),
+ ('int16', '16-bit signed integer (-32768 to 32767)'),
+ ('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
+ ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+ ('uint8', '8-bit unsigned integer (0 to 255)'),
+ ('uint16', '16-bit unsigned integer (0 to 65535)'),
+ ('uint32', '32-bit unsigned integer (0 to 4294967295)'),
+ ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+ ('float96', '96-bit extended-precision floating-point number type'),
+ ('float128', '128-bit extended-precision floating-point number type'),
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+ ])
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+ o = getattr(_numerictypes, obj)
+
+ character_code = dtype(o).char
+ canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
+ alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
+ alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+
+ docstring = """
+ {doc}
+ Character code: ``'{character_code}'``.
+ {canonical_name_doc}{alias_doc}
+ """.format(doc=doc.strip(), character_code=character_code,
+ canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
+
+ add_newdoc('numpy.core.numerictypes', obj, docstring)
+
+
+add_newdoc_for_scalar_type('bool_', ['bool8'],
+ """
+ Boolean type (True or False), stored as a byte.
+ """)
+
+add_newdoc_for_scalar_type('byte', [],
+ """
+ Signed integer type, compatible with C ``char``.
+ """)
+
+add_newdoc_for_scalar_type('short', [],
+ """
+ Signed integer type, compatible with C ``short``.
+ """)
+
+add_newdoc_for_scalar_type('intc', [],
+ """
+ Signed integer type, compatible with C ``int``.
+ """)
+
+add_newdoc_for_scalar_type('int_', [],
+ """
+ Signed integer type, compatible with Python `int` anc C ``long``.
+ """)
+
+add_newdoc_for_scalar_type('longlong', [],
+ """
+ Signed integer type, compatible with C ``long long``.
+ """)
+
+add_newdoc_for_scalar_type('ubyte', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned char``.
+ """)
+
+add_newdoc_for_scalar_type('ushort', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned short``.
+ """)
+
+add_newdoc_for_scalar_type('uintc', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned int``.
+ """)
+
+add_newdoc_for_scalar_type('uint', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned long``.
+ """)
+
+add_newdoc_for_scalar_type('ulonglong', [],
+ """
+ Signed integer type, compatible with C ``unsigned long long``.
+ """)
+
+add_newdoc_for_scalar_type('half', [],
+ """
+ Half-precision floating-point number type.
+ """)
+
+add_newdoc_for_scalar_type('single', [],
+ """
+ Single-precision floating-point number type, compatible with C ``float``.
+ """)
+
+add_newdoc_for_scalar_type('double', ['float_'],
+ """
+ Double-precision floating-point number type, compatible with Python `float`
+ and C ``double``.
+ """)
+
+add_newdoc_for_scalar_type('longdouble', ['longfloat'],
+ """
+ Extended-precision floating-point number type, compatible with C
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+ """)
+
+add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
+ """
+ Complex number type composed of two single-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
+ """
+ Complex number type composed of two double-precision floating-point
+ numbers, compatible with Python `complex`.
+ """)
+
+add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
+ """
+ Complex number type composed of two extended-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('object_', [],
+ """
+ Any Python object.
+ """)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc
new file mode 100644
index 0000000..e6ee6c7
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_add_newdocs.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.py b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.py
new file mode 100644
index 0000000..e69de29
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc
new file mode 100644
index 0000000..6bfc0e6
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_aliased_types.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py
new file mode 100644
index 0000000..3a12c8f
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.py
@@ -0,0 +1,341 @@
+"""
+A place for code to be called from the implementation of np.dtype
+
+String handling is much easier to do correctly in python.
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
+
+
+_kind_to_stem = {
+ 'u': 'uint',
+ 'i': 'int',
+ 'c': 'complex',
+ 'f': 'float',
+ 'b': 'bool',
+ 'V': 'void',
+ 'O': 'object',
+ 'M': 'datetime',
+ 'm': 'timedelta'
+}
+if sys.version_info[0] >= 3:
+ _kind_to_stem.update({
+ 'S': 'bytes',
+ 'U': 'str'
+ })
+else:
+ _kind_to_stem.update({
+ 'S': 'string',
+ 'U': 'unicode'
+ })
+
+
+def _kind_name(dtype):
+ try:
+ return _kind_to_stem[dtype.kind]
+ except KeyError:
+ raise RuntimeError(
+ "internal dtype error, unknown kind {!r}"
+ .format(dtype.kind)
+ )
+
+
+def __str__(dtype):
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=True)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
+ return dtype.str
+ else:
+ return dtype.name
+
+
+def __repr__(dtype):
+ arg_str = _construction_repr(dtype, include_align=False)
+ if dtype.isalignedstruct:
+ arg_str = arg_str + ", align=True"
+ return "dtype({})".format(arg_str)
+
+
+def _unpack_field(dtype, offset, title=None):
+ """
+ Helper function to normalize the items in dtype.fields.
+
+ Call as:
+
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
+ """
+ return dtype, offset, title
+
+
+def _isunsized(dtype):
+ # PyDataType_ISUNSIZED
+ return dtype.itemsize == 0
+
+
+def _construction_repr(dtype, include_align=False, short=False):
+ """
+ Creates a string repr of the dtype, excluding the 'dtype()' part
+ surrounding the object. This object may be a string, a list, or
+ a dict depending on the nature of the dtype. This
+ is the object passed as the first parameter to the dtype
+ constructor, and if no additional constructor parameters are
+ given, will reproduce the exact memory layout.
+
+ Parameters
+ ----------
+ short : bool
+ If true, this creates a shorter repr using 'kind' and 'itemsize', instead
+ of the longer type name.
+
+ include_align : bool
+ If true, this includes the 'align=True' parameter
+ inside the struct dtype construction dict when needed. Use this flag
+ if you want a proper repr string without the 'dtype()' part around it.
+
+ If false, this does not preserve the
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
+ struct arrays like the regular repr does, because the 'align'
+ flag is not part of first dtype constructor parameter. This
+ mode is intended for a full 'repr', where the 'align=True' is
+ provided as the second parameter.
+ """
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=include_align)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ else:
+ return _scalar_str(dtype, short=short)
+
+
+def _scalar_str(dtype, short):
+ byteorder = _byte_order_str(dtype)
+
+ if dtype.type == np.bool_:
+ if short:
+ return "'?'"
+ else:
+ return "'bool'"
+
+ elif dtype.type == np.object_:
+ # The object reference may be different sizes on different
+ # platforms, so it should never include the itemsize here.
+ return "'O'"
+
+ elif dtype.type == np.string_:
+ if _isunsized(dtype):
+ return "'S'"
+ else:
+ return "'S%d'" % dtype.itemsize
+
+ elif dtype.type == np.unicode_:
+ if _isunsized(dtype):
+ return "'%sU'" % byteorder
+ else:
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
+
+ # unlike the other types, subclasses of void are preserved - but
+ # historically the repr does not actually reveal the subclass
+ elif issubclass(dtype.type, np.void):
+ if _isunsized(dtype):
+ return "'V'"
+ else:
+ return "'V%d'" % dtype.itemsize
+
+ elif dtype.type == np.datetime64:
+ return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif dtype.type == np.timedelta64:
+ return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif np.issubdtype(dtype, np.number):
+ # Short repr with endianness, like '' """
+ # hack to obtain the native and swapped byte order characters
+ swapped = np.dtype(int).newbyteorder('s')
+ native = swapped.newbyteorder('s')
+
+ byteorder = dtype.byteorder
+ if byteorder == '=':
+ return native.byteorder
+ if byteorder == 's':
+ # TODO: this path can never be reached
+ return swapped.byteorder
+ elif byteorder == '|':
+ return ''
+ else:
+ return byteorder
+
+
+def _datetime_metadata_str(dtype):
+ # TODO: this duplicates the C append_metastr_to_string
+ unit, count = np.datetime_data(dtype)
+ if unit == 'generic':
+ return ''
+ elif count == 1:
+ return '[{}]'.format(unit)
+ else:
+ return '[{}{}]'.format(count, unit)
+
+
+def _struct_dict_str(dtype, includealignedflag):
+ # unpack the fields dictionary into ls
+ names = dtype.names
+ fld_dtypes = []
+ offsets = []
+ titles = []
+ for name in names:
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
+ fld_dtypes.append(fld_dtype)
+ offsets.append(offset)
+ titles.append(title)
+
+ # Build up a string to make the dictionary
+
+ # First, the names
+ ret = "{'names':["
+ ret += ",".join(repr(name) for name in names)
+
+ # Second, the formats
+ ret += "], 'formats':["
+ ret += ",".join(
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
+
+ # Third, the offsets
+ ret += "], 'offsets':["
+ ret += ",".join("%d" % offset for offset in offsets)
+
+ # Fourth, the titles
+ if any(title is not None for title in titles):
+ ret += "], 'titles':["
+ ret += ",".join(repr(title) for title in titles)
+
+ # Fifth, the itemsize
+ ret += "], 'itemsize':%d" % dtype.itemsize
+
+ if (includealignedflag and dtype.isalignedstruct):
+ # Finally, the aligned flag
+ ret += ", 'aligned':True}"
+ else:
+ ret += "}"
+
+ return ret
+
+
+def _is_packed(dtype):
+ """
+ Checks whether the structured data type in 'dtype'
+ has a simple layout, where all the fields are in order,
+ and follow each other with no alignment padding.
+
+ When this returns true, the dtype can be reconstructed
+ from a list of the field names and dtypes with no additional
+ dtype parameters.
+
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio.
+ """
+ total_offset = 0
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+ if fld_offset != total_offset:
+ return False
+ total_offset += fld_dtype.itemsize
+ if total_offset != dtype.itemsize:
+ return False
+ return True
+
+
+def _struct_list_str(dtype):
+ items = []
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ item = "("
+ if title is not None:
+ item += "({!r}, {!r}), ".format(title, name)
+ else:
+ item += "{!r}, ".format(name)
+ # Special case subarray handling here
+ if fld_dtype.subdtype is not None:
+ base, shape = fld_dtype.subdtype
+ item += "{}, {}".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+ else:
+ item += _construction_repr(fld_dtype, short=True)
+
+ item += ")"
+ items.append(item)
+
+ return "[" + ", ".join(items) + "]"
+
+
+def _struct_str(dtype, include_align):
+ # The list str representation can't include the 'align=' flag,
+ # so if it is requested and the struct has the aligned flag set,
+ # we must use the dict str instead.
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
+ sub = _struct_list_str(dtype)
+
+ else:
+ sub = _struct_dict_str(dtype, include_align)
+
+ # If the data type isn't the default, void, show it
+ if dtype.type != np.void:
+ return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
+ else:
+ return sub
+
+
+def _subarray_str(dtype):
+ base, shape = dtype.subdtype
+ return "({}, {})".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+
+
+def _name_get(dtype):
+ # provides dtype.name.__get__
+
+ if dtype.isbuiltin == 2:
+ # user dtypes don't promise to do anything special
+ return dtype.type.__name__
+
+ # Builtin classes are documented as returning a "bit name"
+ name = dtype.type.__name__
+
+ # handle bool_, str_, etc
+ if name[-1] == '_':
+ name = name[:-1]
+
+ # append bit counts to str, unicode, and void
+ if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype):
+ name += "{}".format(dtype.itemsize * 8)
+
+ # append metadata to datetimes
+ elif dtype.type in (np.datetime64, np.timedelta64):
+ name += _datetime_metadata_str(dtype)
+
+ return name
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc
new file mode 100644
index 0000000..fd10efd
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py
new file mode 100644
index 0000000..0852b1e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,113 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could acheive this through the PEP3118 buffer protocol,
+something like::
+
+ def dtype_from_ctypes_type(t):
+ # needed to ensure that the shape of `t` is within memoryview.format
+ class DummyStruct(ctypes.Structure):
+ _fields_ = [('a', t)]
+
+ # empty to avoid memory allocation
+ ctype_0 = (DummyStruct * 0)()
+ mv = memoryview(ctype_0)
+
+ # convert the struct, and slice back out the field
+ return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+import _ctypes
+import ctypes
+
+import numpy as np
+
+
+def _from_ctypes_array(t):
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+ for item in t._fields_:
+ if len(item) > 2:
+ raise TypeError(
+ "ctypes bitfields have no dtype equivalent")
+
+ if hasattr(t, "_pack_"):
+ formats = []
+ offsets = []
+ names = []
+ current_offset = 0
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ # Each type has a default offset, this is platform dependent for some types.
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+ current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
+ offsets.append(current_offset)
+ current_offset += ctypes.sizeof(ftyp)
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+ else:
+ fields = []
+ for fname, ftyp in t._fields_:
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+ # by default, ctypes structs are aligned
+ return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+ """
+ Return the dtype type with endianness included if it's the case
+ """
+ if getattr(t, '__ctype_be__', None) is t:
+ return np.dtype('>' + t._type_)
+ elif getattr(t, '__ctype_le__', None) is t:
+ return np.dtype('<' + t._type_)
+ else:
+ return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+ formats = []
+ offsets = []
+ names = []
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ offsets.append(0) # Union fields are offset to 0
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+
+
+def dtype_from_ctypes_type(t):
+ """
+ Construct a dtype object from a ctypes type
+ """
+ if issubclass(t, _ctypes.Array):
+ return _from_ctypes_array(t)
+ elif issubclass(t, _ctypes._Pointer):
+ raise TypeError("ctypes pointers have no dtype equivalent")
+ elif issubclass(t, _ctypes.Structure):
+ return _from_ctypes_structure(t)
+ elif issubclass(t, _ctypes.Union):
+ return _from_ctypes_union(t)
+ elif isinstance(getattr(t, '_type_', None), str):
+ return _from_ctypes_scalar(t)
+ else:
+ raise NotImplementedError(
+ "Unknown ctypes type {}".format(t.__name__))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc
new file mode 100644
index 0000000..5e64300
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dtype_ctypes.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so b/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so
new file mode 100755
index 0000000..093ed33
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_dummy.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py
new file mode 100644
index 0000000..1d3bb55
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.py
@@ -0,0 +1,928 @@
+"""
+A place for internal code
+
+Some things are more easily handled Python.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import re
+import sys
+
+from numpy.compat import unicode
+from numpy.core.overrides import set_module
+from .multiarray import dtype, array, ndarray
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+if (sys.byteorder == 'little'):
+ _nbo = b'<'
+else:
+ _nbo = b'>'
+
+def _makenames_list(adict, align):
+ allfields = []
+ fnames = list(adict.keys())
+ for fname in fnames:
+ obj = adict[fname]
+ n = len(obj)
+ if not isinstance(obj, tuple) or n not in [2, 3]:
+ raise ValueError("entry not a 2- or 3- tuple")
+ if (n > 2) and (obj[2] == fname):
+ continue
+ num = int(obj[1])
+ if (num < 0):
+ raise ValueError("invalid offset.")
+ format = dtype(obj[0], align=align)
+ if (n > 2):
+ title = obj[2]
+ else:
+ title = None
+ allfields.append((fname, format, num, title))
+ # sort by offsets
+ allfields.sort(key=lambda x: x[2])
+ names = [x[0] for x in allfields]
+ formats = [x[1] for x in allfields]
+ offsets = [x[2] for x in allfields]
+ titles = [x[3] for x in allfields]
+
+ return names, formats, offsets, titles
+
+# Called in PyArray_DescrConverter function when
+# a dictionary without "names" and "formats"
+# fields is used as a data-type descriptor.
+def _usefields(adict, align):
+ try:
+ names = adict[-1]
+ except KeyError:
+ names = None
+ if names is None:
+ names, formats, offsets, titles = _makenames_list(adict, align)
+ else:
+ formats = []
+ offsets = []
+ titles = []
+ for name in names:
+ res = adict[name]
+ formats.append(res[0])
+ offsets.append(res[1])
+ if (len(res) > 2):
+ titles.append(res[2])
+ else:
+ titles.append(None)
+
+ return dtype({"names": names,
+ "formats": formats,
+ "offsets": offsets,
+ "titles": titles}, align)
+
+
+# construct an array_protocol descriptor list
+# from the fields attribute of a descriptor
+# This calls itself recursively but should eventually hit
+# a descriptor that has no fields and then return
+# a simple typestring
+
+def _array_descr(descriptor):
+ fields = descriptor.fields
+ if fields is None:
+ subdtype = descriptor.subdtype
+ if subdtype is None:
+ if descriptor.metadata is None:
+ return descriptor.str
+ else:
+ new = descriptor.metadata.copy()
+ if new:
+ return (descriptor.str, new)
+ else:
+ return descriptor.str
+ else:
+ return (_array_descr(subdtype[0]), subdtype[1])
+
+ names = descriptor.names
+ ordered_fields = [fields[x] + (x,) for x in names]
+ result = []
+ offset = 0
+ for field in ordered_fields:
+ if field[1] > offset:
+ num = field[1] - offset
+ result.append(('', '|V%d' % num))
+ offset += num
+ elif field[1] < offset:
+ raise ValueError(
+ "dtype.descr is not defined for types with overlapping or "
+ "out-of-order fields")
+ if len(field) > 3:
+ name = (field[2], field[3])
+ else:
+ name = field[2]
+ if field[0].subdtype:
+ tup = (name, _array_descr(field[0].subdtype[0]),
+ field[0].subdtype[1])
+ else:
+ tup = (name, _array_descr(field[0]))
+ offset += field[0].itemsize
+ result.append(tup)
+
+ if descriptor.itemsize > offset:
+ num = descriptor.itemsize - offset
+ result.append(('', '|V%d' % num))
+
+ return result
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+ return ndarray.__new__(subtype, shape, dtype)
+
+
+# format_re was originally from numarray by J. Todd Miller
+
+format_re = re.compile(br'(?P[<>|=]?)'
+ br'(?P *[(]?[ ,0-9L]*[)]? *)'
+ br'(?P[<>|=]?)'
+ br'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
+sep_re = re.compile(br'\s*,\s*')
+space_re = re.compile(br'\s+$')
+
+# astr is a string (perhaps comma separated)
+
+_convorder = {b'=': _nbo}
+
+def _commastring(astr):
+ startindex = 0
+ result = []
+ while startindex < len(astr):
+ mo = format_re.match(astr, pos=startindex)
+ try:
+ (order1, repeats, order2, dtype) = mo.groups()
+ except (TypeError, AttributeError):
+ raise ValueError('format number %d of "%s" is not recognized' %
+ (len(result)+1, astr))
+ startindex = mo.end()
+ # Separator or ending padding
+ if startindex < len(astr):
+ if space_re.match(astr, pos=startindex):
+ startindex = len(astr)
+ else:
+ mo = sep_re.match(astr, pos=startindex)
+ if not mo:
+ raise ValueError(
+ 'format number %d of "%s" is not recognized' %
+ (len(result)+1, astr))
+ startindex = mo.end()
+
+ if order2 == b'':
+ order = order1
+ elif order1 == b'':
+ order = order2
+ else:
+ order1 = _convorder.get(order1, order1)
+ order2 = _convorder.get(order2, order2)
+ if (order1 != order2):
+ raise ValueError(
+ 'inconsistent byte-order specification %s and %s' %
+ (order1, order2))
+ order = order1
+
+ if order in [b'|', b'=', _nbo]:
+ order = b''
+ dtype = order + dtype
+ if (repeats == b''):
+ newitem = dtype
+ else:
+ newitem = (dtype, eval(repeats))
+ result.append(newitem)
+
+ return result
+
+class dummy_ctype(object):
+ def __init__(self, cls):
+ self._cls = cls
+ def __mul__(self, other):
+ return self
+ def __call__(self, *other):
+ return self._cls(other)
+ def __eq__(self, other):
+ return self._cls == other._cls
+ def __ne__(self, other):
+ return self._cls != other._cls
+
+def _getintp_ctype():
+ val = _getintp_ctype.cache
+ if val is not None:
+ return val
+ if ctypes is None:
+ import numpy as np
+ val = dummy_ctype(np.intp)
+ else:
+ char = dtype('p').char
+ if (char == 'i'):
+ val = ctypes.c_int
+ elif char == 'l':
+ val = ctypes.c_long
+ elif char == 'q':
+ val = ctypes.c_longlong
+ else:
+ val = ctypes.c_long
+ _getintp_ctype.cache = val
+ return val
+_getintp_ctype.cache = None
+
+# Used for .ctypes attribute of ndarray
+
+class _missing_ctypes(object):
+ def cast(self, num, obj):
+ return num.value
+
+ class c_void_p(object):
+ def __init__(self, ptr):
+ self.value = ptr
+
+
+class _unsafe_first_element_pointer(object):
+ """
+ Helper to allow viewing an array as a ctypes pointer to the first element
+
+ This avoids:
+ * dealing with strides
+ * `.view` rejecting object-containing arrays
+ * `memoryview` not supporting overlapping fields
+ """
+ def __init__(self, arr):
+ self.base = arr
+
+ @property
+ def __array_interface__(self):
+ i = dict(
+ shape=(),
+ typestr='|V0',
+ data=(self.base.__array_interface__['data'][0], False),
+ strides=(),
+ version=3,
+ )
+ return i
+
+
+def _get_void_ptr(arr):
+ """
+ Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
+ """
+ import numpy as np
+ # convert to a 0d array that has a data pointer referrign to the start
+ # of arr. This holds a reference to arr.
+ simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
+
+ # create a `char[0]` using the same memory.
+ c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
+
+ # finally cast to void*
+ return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
+
+
+class _ctypes(object):
+ def __init__(self, array, ptr=None):
+ self._arr = array
+
+ if ctypes:
+ self._ctypes = ctypes
+ # get a void pointer to the buffer, which keeps the array alive
+ self._data = _get_void_ptr(array)
+ assert self._data.value == ptr
+ else:
+ # fake a pointer-like object that holds onto the reference
+ self._ctypes = _missing_ctypes()
+ self._data = self._ctypes.c_void_p(ptr)
+ self._data._objects = array
+
+ if self._arr.ndim == 0:
+ self._zerod = True
+ else:
+ self._zerod = False
+
+ def data_as(self, obj):
+ """
+ Return the data pointer cast to a particular c-types object.
+ For example, calling ``self._as_parameter_`` is equivalent to
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
+ pointer to a ctypes array of floating-point data:
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+ The returned pointer will keep a reference to the array.
+ """
+ return self._ctypes.cast(self._data, obj)
+
+ def shape_as(self, obj):
+ """
+ Return the shape tuple as an array of some other c-types
+ type. For example: ``self.shape_as(ctypes.c_short)``.
+ """
+ if self._zerod:
+ return None
+ return (obj*self._arr.ndim)(*self._arr.shape)
+
+ def strides_as(self, obj):
+ """
+ Return the strides tuple as an array of some other
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
+ """
+ if self._zerod:
+ return None
+ return (obj*self._arr.ndim)(*self._arr.strides)
+
+ @property
+ def data(self):
+ """
+ A pointer to the memory area of the array as a Python integer.
+ This memory area may contain data that is not aligned, or not in correct
+ byte-order. The memory area may not even be writeable. The array
+ flags and data-type of this array should be respected when passing this
+ attribute to arbitrary C-code to avoid trouble that can include Python
+ crashing. User Beware! The value of this attribute is exactly the same
+ as ``self._array_interface_['data'][0]``.
+
+ Note that unlike `data_as`, a reference will not be kept to the array:
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+ pointer to a deallocated array, and should be spelt
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
+ """
+ return self._data.value
+
+ @property
+ def shape(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
+ platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
+ `ctypes.c_longlong` depending on the platform.
+ The c_intp type is defined accordingly in `numpy.ctypeslib`.
+ The ctypes array contains the shape of the underlying array.
+ """
+ return self.shape_as(_getintp_ctype())
+
+ @property
+ def strides(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the same as for the shape attribute. This ctypes array
+ contains the strides information from the underlying array. This strides
+ information is important for showing how many bytes must be jumped to
+ get to the next element in the array.
+ """
+ return self.strides_as(_getintp_ctype())
+
+ @property
+ def _as_parameter_(self):
+ """
+ Overrides the ctypes semi-magic method
+
+ Enables `c_func(some_array.ctypes)`
+ """
+ return self._data
+
+ # kept for compatibility
+ get_data = data.fget
+ get_shape = shape.fget
+ get_strides = strides.fget
+ get_as_parameter = _as_parameter_.fget
+
+
+def _newnames(datatype, order):
+ """
+ Given a datatype and an order object, return a new names tuple, with the
+ order indicated
+ """
+ oldnames = datatype.names
+ nameslist = list(oldnames)
+ if isinstance(order, (str, unicode)):
+ order = [order]
+ seen = set()
+ if isinstance(order, (list, tuple)):
+ for name in order:
+ try:
+ nameslist.remove(name)
+ except ValueError:
+ if name in seen:
+ raise ValueError("duplicate field name: %s" % (name,))
+ else:
+ raise ValueError("unknown field name: %s" % (name,))
+ seen.add(name)
+ return tuple(list(order) + nameslist)
+ raise ValueError("unsupported order value: %s" % (order,))
+
+def _copy_fields(ary):
+ """Return copy of structured array with padding between fields removed.
+
+ Parameters
+ ----------
+ ary : ndarray
+ Structured array from which to remove padding bytes
+
+ Returns
+ -------
+ ary_copy : ndarray
+ Copy of ary with padding bytes removed
+ """
+ dt = ary.dtype
+ copy_dtype = {'names': dt.names,
+ 'formats': [dt.fields[name][0] for name in dt.names]}
+ return array(ary, dtype=copy_dtype, copy=True)
+
+def _getfield_is_safe(oldtype, newtype, offset):
+ """ Checks safety of getfield for object arrays.
+
+ As in _view_is_safe, we need to check that memory containing objects is not
+ reinterpreted as a non-object datatype and vice versa.
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of the original ndarray.
+ newtype : data-type
+ Data type of the field being accessed by ndarray.getfield
+ offset : int
+ Offset of the field being accessed by ndarray.getfield
+
+ Raises
+ ------
+ TypeError
+ If the field access is invalid
+
+ """
+ if newtype.hasobject or oldtype.hasobject:
+ if offset == 0 and newtype == oldtype:
+ return
+ if oldtype.names:
+ for name in oldtype.names:
+ if (oldtype.fields[name][1] == offset and
+ oldtype.fields[name][0] == newtype):
+ return
+ raise TypeError("Cannot get/set field of an object array")
+ return
+
+def _view_is_safe(oldtype, newtype):
+ """ Checks safety of a view involving object arrays, for example when
+ doing::
+
+ np.zeros(10, dtype=oldtype).view(newtype)
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of original ndarray
+ newtype : data-type
+ Data type of the view
+
+ Raises
+ ------
+ TypeError
+ If the new type is incompatible with the old type.
+
+ """
+
+ # if the types are equivalent, there is no problem.
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
+ if oldtype == newtype:
+ return
+
+ if newtype.hasobject or oldtype.hasobject:
+ raise TypeError("Cannot change data-type for object array.")
+ return
+
+# Given a string containing a PEP 3118 format specifier,
+# construct a NumPy dtype
+
+_pep3118_native_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'h',
+ 'H': 'H',
+ 'i': 'i',
+ 'I': 'I',
+ 'l': 'l',
+ 'L': 'L',
+ 'q': 'q',
+ 'Q': 'Q',
+ 'e': 'e',
+ 'f': 'f',
+ 'd': 'd',
+ 'g': 'g',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 'Zg': 'G',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
+
+_pep3118_standard_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'i2',
+ 'H': 'u2',
+ 'i': 'i4',
+ 'I': 'u4',
+ 'l': 'i4',
+ 'L': 'u4',
+ 'q': 'i8',
+ 'Q': 'u8',
+ 'e': 'f2',
+ 'f': 'f',
+ 'd': 'd',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
+
+_pep3118_unsupported_map = {
+ 'u': 'UCS-2 strings',
+ '&': 'pointers',
+ 't': 'bitfields',
+ 'X': 'function pointers',
+}
+
+class _Stream(object):
+ def __init__(self, s):
+ self.s = s
+ self.byteorder = '@'
+
+ def advance(self, n):
+ res = self.s[:n]
+ self.s = self.s[n:]
+ return res
+
+ def consume(self, c):
+ if self.s[:len(c)] == c:
+ self.advance(len(c))
+ return True
+ return False
+
+ def consume_until(self, c):
+ if callable(c):
+ i = 0
+ while i < len(self.s) and not c(self.s[i]):
+ i = i + 1
+ return self.advance(i)
+ else:
+ i = self.s.index(c)
+ res = self.advance(i)
+ self.advance(len(c))
+ return res
+
+ @property
+ def next(self):
+ return self.s[0]
+
+ def __bool__(self):
+ return bool(self.s)
+ __nonzero__ = __bool__
+
+
+def _dtype_from_pep3118(spec):
+ stream = _Stream(spec)
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
+ return dtype
+
+def __dtype_from_pep3118(stream, is_subdtype):
+ field_spec = dict(
+ names=[],
+ formats=[],
+ offsets=[],
+ itemsize=0
+ )
+ offset = 0
+ common_alignment = 1
+ is_padding = False
+
+ # Parse spec
+ while stream:
+ value = None
+
+ # End of structure, bail out to upper level
+ if stream.consume('}'):
+ break
+
+ # Sub-arrays (1)
+ shape = None
+ if stream.consume('('):
+ shape = stream.consume_until(')')
+ shape = tuple(map(int, shape.split(',')))
+
+ # Byte order
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
+ byteorder = stream.advance(1)
+ if byteorder == '!':
+ byteorder = '>'
+ stream.byteorder = byteorder
+
+ # Byte order characters also control native vs. standard type sizes
+ if stream.byteorder in ('@', '^'):
+ type_map = _pep3118_native_map
+ type_map_chars = _pep3118_native_typechars
+ else:
+ type_map = _pep3118_standard_map
+ type_map_chars = _pep3118_standard_typechars
+
+ # Item sizes
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
+ if itemsize_str:
+ itemsize = int(itemsize_str)
+ else:
+ itemsize = 1
+
+ # Data types
+ is_padding = False
+
+ if stream.consume('T{'):
+ value, align = __dtype_from_pep3118(
+ stream, is_subdtype=True)
+ elif stream.next in type_map_chars:
+ if stream.next == 'Z':
+ typechar = stream.advance(2)
+ else:
+ typechar = stream.advance(1)
+
+ is_padding = (typechar == 'x')
+ dtypechar = type_map[typechar]
+ if dtypechar in 'USV':
+ dtypechar += '%d' % itemsize
+ itemsize = 1
+ numpy_byteorder = {'@': '=', '^': '='}.get(
+ stream.byteorder, stream.byteorder)
+ value = dtype(numpy_byteorder + dtypechar)
+ align = value.alignment
+ elif stream.next in _pep3118_unsupported_map:
+ desc = _pep3118_unsupported_map[stream.next]
+ raise NotImplementedError(
+ "Unrepresentable PEP 3118 data type {!r} ({})"
+ .format(stream.next, desc))
+ else:
+ raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
+
+ #
+ # Native alignment may require padding
+ #
+ # Here we assume that the presence of a '@' character implicitly implies
+ # that the start of the array is *already* aligned.
+ #
+ extra_offset = 0
+ if stream.byteorder == '@':
+ start_padding = (-offset) % align
+ intra_padding = (-value.itemsize) % align
+
+ offset += start_padding
+
+ if intra_padding != 0:
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
+ # Inject internal padding to the end of the sub-item
+ value = _add_trailing_padding(value, intra_padding)
+ else:
+ # We can postpone the injection of internal padding,
+ # as the item appears at most once
+ extra_offset += intra_padding
+
+ # Update common alignment
+ common_alignment = _lcm(align, common_alignment)
+
+ # Convert itemsize to sub-array
+ if itemsize != 1:
+ value = dtype((value, (itemsize,)))
+
+ # Sub-arrays (2)
+ if shape is not None:
+ value = dtype((value, shape))
+
+ # Field name
+ if stream.consume(':'):
+ name = stream.consume_until(':')
+ else:
+ name = None
+
+ if not (is_padding and name is None):
+ if name is not None and name in field_spec['names']:
+ raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
+ % name)
+ field_spec['names'].append(name)
+ field_spec['formats'].append(value)
+ field_spec['offsets'].append(offset)
+
+ offset += value.itemsize
+ offset += extra_offset
+
+ field_spec['itemsize'] = offset
+
+ # extra final padding for aligned types
+ if stream.byteorder == '@':
+ field_spec['itemsize'] += (-offset) % common_alignment
+
+ # Check if this was a simple 1-item type, and unwrap it
+ if (field_spec['names'] == [None]
+ and field_spec['offsets'][0] == 0
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
+ and not is_subdtype):
+ ret = field_spec['formats'][0]
+ else:
+ _fix_names(field_spec)
+ ret = dtype(field_spec)
+
+ # Finished
+ return ret, common_alignment
+
+def _fix_names(field_spec):
+ """ Replace names which are None with the next unused f%d name """
+ names = field_spec['names']
+ for i, name in enumerate(names):
+ if name is not None:
+ continue
+
+ j = 0
+ while True:
+ name = 'f{}'.format(j)
+ if name not in names:
+ break
+ j = j + 1
+ names[i] = name
+
+def _add_trailing_padding(value, padding):
+ """Inject the specified number of padding bytes at the end of a dtype"""
+ if value.fields is None:
+ field_spec = dict(
+ names=['f0'],
+ formats=[value],
+ offsets=[0],
+ itemsize=value.itemsize
+ )
+ else:
+ fields = value.fields
+ names = value.names
+ field_spec = dict(
+ names=names,
+ formats=[fields[name][0] for name in names],
+ offsets=[fields[name][1] for name in names],
+ itemsize=value.itemsize
+ )
+
+ field_spec['itemsize'] += padding
+ return dtype(field_spec)
+
+def _prod(a):
+ p = 1
+ for x in a:
+ p *= x
+ return p
+
+def _gcd(a, b):
+ """Calculate the greatest common divisor of a and b"""
+ while b:
+ a, b = b, a % b
+ return a
+
+def _lcm(a, b):
+ return a // _gcd(a, b) * b
+
+# Exception used in shares_memory()
+@set_module('numpy')
+class TooHardError(RuntimeError):
+ pass
+
+@set_module('numpy')
+class AxisError(ValueError, IndexError):
+ """ Axis supplied was invalid. """
+ def __init__(self, axis, ndim=None, msg_prefix=None):
+ # single-argument form just delegates to base class
+ if ndim is None and msg_prefix is None:
+ msg = axis
+
+ # do the string formatting here, to save work in the C code
+ else:
+ msg = ("axis {} is out of bounds for array of dimension {}"
+ .format(axis, ndim))
+ if msg_prefix is not None:
+ msg = "{}: {}".format(msg_prefix, msg)
+
+ super(AxisError, self).__init__(msg)
+
+
+def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
+ ['{}={!r}'.format(k, v)
+ for k, v in kwargs.items()])
+ args = inputs + kwargs.get('out', ())
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
+ return ('operand type(s) all returned NotImplemented from '
+ '__array_ufunc__({!r}, {!r}, {}): {}'
+ .format(ufunc, method, args_string, types_string))
+
+
+def array_function_errmsg_formatter(public_api, types):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+ return ("no implementation found for '{}' on types that implement "
+ '__array_function__: {}'.format(func_name, list(types)))
+
+
+def _ufunc_doc_signature_formatter(ufunc):
+ """
+ Builds a signature string which resembles PEP 457
+
+ This is used to construct the first line of the docstring
+ """
+
+ # input arguments are simple
+ if ufunc.nin == 1:
+ in_args = 'x'
+ else:
+ in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
+
+ # output arguments are both keyword or positional
+ if ufunc.nout == 0:
+ out_args = ', /, out=()'
+ elif ufunc.nout == 1:
+ out_args = ', /, out=None'
+ else:
+ out_args = '[, {positional}], / [, out={default}]'.format(
+ positional=', '.join(
+ 'out{}'.format(i+1) for i in range(ufunc.nout)),
+ default=repr((None,)*ufunc.nout)
+ )
+
+ # keyword only args depend on whether this is a gufunc
+ kwargs = (
+ ", casting='same_kind'"
+ ", order='K'"
+ ", dtype=None"
+ ", subok=True"
+ "[, signature"
+ ", extobj]"
+ )
+ if ufunc.signature is None:
+ kwargs = ", where=True" + kwargs
+
+ # join all the parts together
+ return '{name}({in_args}{out_args}, *{kwargs})'.format(
+ name=ufunc.__name__,
+ in_args=in_args,
+ out_args=out_args,
+ kwargs=kwargs
+ )
+
+
+def npy_ctypes_check(cls):
+ # determine if a class comes from ctypes, in order to work around
+ # a bug in the buffer protocol for those objects, bpo-10746
+ try:
+ # ctypes class are new-style, so have an __mro__. This probably fails
+ # for ctypes classes with multiple inheritance.
+ ctype_base = cls.__mro__[-2]
+ # right now, they're part of the _ctypes module
+ return 'ctypes' in ctype_base.__module__
+ except Exception:
+ return False
+
+
+class recursive(object):
+ '''
+ A decorator class for recursive nested functions.
+ Naive recursive nested functions hold a reference to themselves:
+
+ def outer(*args):
+ def stringify_leaky(arg0, *arg1):
+ if len(arg1) > 0:
+ return stringify_leaky(*arg1) # <- HERE
+ return str(arg0)
+ stringify_leaky(*args)
+
+ This design pattern creates a reference cycle that is difficult for a
+ garbage collector to resolve. The decorator class prevents the
+ cycle by passing the nested function in as an argument `self`:
+
+ def outer(*args):
+ @recursive
+ def stringify(self, arg0, *arg1):
+ if len(arg1) > 0:
+ return self(*arg1)
+ return str(arg0)
+ stringify(*args)
+
+ '''
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args, **kwargs):
+ return self.func(self, *args, **kwargs)
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc
new file mode 100644
index 0000000..71188ad
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_internal.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py
new file mode 100644
index 0000000..33f6d01
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.py
@@ -0,0 +1,156 @@
+"""
+Array methods which are called by both the C-code for the method
+and the Python code for the NumPy-namespace function
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
+from numpy.core import multiarray as mu
+from numpy.core import umath as um
+from numpy.core.numeric import asanyarray
+from numpy.core import numerictypes as nt
+from numpy._globals import _NoValue
+
+# save those O(100) nanoseconds!
+umr_maximum = um.maximum.reduce
+umr_minimum = um.minimum.reduce
+umr_sum = um.add.reduce
+umr_prod = um.multiply.reduce
+umr_any = um.logical_or.reduce
+umr_all = um.logical_and.reduce
+
+# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
+# small reductions
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_maximum(a, axis, None, out, keepdims, initial)
+
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_minimum(a, axis, None, out, keepdims, initial)
+
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_sum(a, axis, dtype, out, keepdims, initial)
+
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue):
+ return umr_prod(a, axis, dtype, out, keepdims, initial)
+
+def _any(a, axis=None, dtype=None, out=None, keepdims=False):
+ return umr_any(a, axis, dtype, out, keepdims)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False):
+ return umr_all(a, axis, dtype, out, keepdims)
+
+def _count_reduce_items(arr, axis):
+ if axis is None:
+ axis = tuple(range(arr.ndim))
+ if not isinstance(axis, tuple):
+ axis = (axis,)
+ items = 1
+ for ax in axis:
+ items *= arr.shape[ax]
+ return items
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
+ arr = asanyarray(a)
+
+ is_float16_result = False
+ rcount = _count_reduce_items(arr, axis)
+ # Make this warning show up first
+ if rcount == 0:
+ warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None:
+ if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+ elif issubclass(arr.dtype.type, nt.float16):
+ dtype = mu.dtype('f4')
+ is_float16_result = True
+
+ ret = umr_sum(arr, axis, dtype, out, keepdims)
+ if isinstance(ret, mu.ndarray):
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ if is_float16_result and out is None:
+ ret = arr.dtype.type(ret)
+ elif hasattr(ret, 'dtype'):
+ if is_float16_result:
+ ret = arr.dtype.type(ret / rcount)
+ else:
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+ arr = asanyarray(a)
+
+ rcount = _count_reduce_items(arr, axis)
+ # Make this warning show up on top.
+ if ddof >= rcount:
+ warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
+ stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+
+ # Compute the mean.
+ # Note that if dtype is not of inexact type then arraymean will
+ # not be either.
+ arrmean = umr_sum(arr, axis, dtype, keepdims=True)
+ if isinstance(arrmean, mu.ndarray):
+ arrmean = um.true_divide(
+ arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
+ else:
+ arrmean = arrmean.dtype.type(arrmean / rcount)
+
+ # Compute sum of squared deviations from mean
+ # Note that x may not be inexact and that we need it to be an array,
+ # not a scalar.
+ x = asanyarray(arr - arrmean)
+ if issubclass(arr.dtype.type, nt.complexfloating):
+ x = um.multiply(x, um.conjugate(x), out=x).real
+ else:
+ x = um.multiply(x, x, out=x)
+ ret = umr_sum(x, axis, dtype, out, keepdims)
+
+ # Compute degrees of freedom and make sure it is not negative.
+ rcount = max([rcount - ddof, 0])
+
+ # divide by degrees of freedom
+ if isinstance(ret, mu.ndarray):
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+ ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims)
+
+ if isinstance(ret, mu.ndarray):
+ ret = um.sqrt(ret, out=ret)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(um.sqrt(ret))
+ else:
+ ret = um.sqrt(ret)
+
+ return ret
+
+def _ptp(a, axis=None, out=None, keepdims=False):
+ return um.subtract(
+ umr_maximum(a, axis, None, out, keepdims),
+ umr_minimum(a, axis, None, None, keepdims),
+ out
+ )
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc
new file mode 100644
index 0000000..c1d5c2f
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_methods.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so
new file mode 100755
index 0000000..b019305
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_tests.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so
new file mode 100755
index 0000000..81bffb6
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_multiarray_umath.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so
new file mode 100755
index 0000000..93291dd
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_operand_flag_tests.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so
new file mode 100755
index 0000000..08a153a
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_rational_tests.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py
new file mode 100644
index 0000000..45e6a73
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.py
@@ -0,0 +1,100 @@
+"""
+String-handling utilities to avoid locale-dependence.
+
+Used primarily to generate type name aliases.
+"""
+# "import string" is costly to import!
+# Construct the translation tables directly
+# "A" = chr(65), "a" = chr(97)
+_all_chars = [chr(_m) for _m in range(256)]
+_ascii_upper = _all_chars[65:65+26]
+_ascii_lower = _all_chars[97:97+26]
+LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
+UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
+
+
+def english_lower(s):
+ """ Apply English case rules to convert ASCII strings to all lower case.
+
+ This is an internal utility function to replace calls to str.lower() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ lowered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_lower
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
+ >>> english_lower('')
+ ''
+ """
+ lowered = s.translate(LOWER_TABLE)
+ return lowered
+
+
+def english_upper(s):
+ """ Apply English case rules to convert ASCII strings to all upper case.
+
+ This is an internal utility function to replace calls to str.upper() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ uppered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_upper
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+ >>> english_upper('')
+ ''
+ """
+ uppered = s.translate(UPPER_TABLE)
+ return uppered
+
+
+def english_capitalize(s):
+ """ Apply English case rules to convert the first character of an ASCII
+ string to upper case.
+
+ This is an internal utility function to replace calls to str.capitalize()
+ such that we can avoid changing behavior with changing locales.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ capitalized : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_capitalize
+ >>> english_capitalize('int8')
+ 'Int8'
+ >>> english_capitalize('Int8')
+ 'Int8'
+ >>> english_capitalize('')
+ ''
+ """
+ if s:
+ return english_upper(s[0]) + s[1:]
+ else:
+ return s
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc
new file mode 100644
index 0000000..d0d1785
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_string_helpers.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so
new file mode 100755
index 0000000..a29a741
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_struct_ufunc_tests.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py
new file mode 100644
index 0000000..d6e1a1f
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.py
@@ -0,0 +1,282 @@
+"""
+Due to compatibility, numpy has a very large number of different naming
+conventions for the scalar types (those subclassing from `numpy.generic`).
+This file produces a convoluted set of dictionaries mapping names to types,
+and sometimes other mappings too.
+
+.. data:: allTypes
+ A dictionary of names to types that will be exposed as attributes through
+ ``np.core.numerictypes.*``
+
+.. data:: sctypeDict
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
+
+.. data:: sctypeNA
+ NumArray-compatible names for the scalar types. Contains not only
+ ``name: type`` mappings, but ``char: name`` mappings too.
+
+ .. deprecated:: 1.16
+
+.. data:: sctypes
+ A dictionary keyed by a "type group" string, providing a list of types
+ under that group.
+
+"""
+import warnings
+import sys
+
+from numpy.compat import unicode
+from numpy._globals import VisibleDeprecationWarning
+from numpy.core._string_helpers import english_lower, english_capitalize
+from numpy.core.multiarray import typeinfo, dtype
+from numpy.core._dtype import _kind_name
+
+
+sctypeDict = {} # Contains all leaf-node scalar types with aliases
+class TypeNADict(dict):
+ def __getitem__(self, key):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.__getitem__(self, key)
+ def get(self, key, default=None):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.get(self, key, default)
+
+sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences
+allTypes = {} # Collect the types we will add to the module
+
+
+# separate the actual type info from the abstract base classes
+_abstract_types = {}
+_concrete_typeinfo = {}
+for k, v in typeinfo.items():
+ # make all the keys lowercase too
+ k = english_lower(k)
+ if isinstance(v, type):
+ _abstract_types[k] = v
+ else:
+ _concrete_typeinfo[k] = v
+
+_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
+
+
+def _bits_of(obj):
+ try:
+ info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
+ except StopIteration:
+ if obj in _abstract_types.values():
+ raise ValueError("Cannot count the bits of an abstract type")
+
+ # some third-party type - make a best-guess
+ return dtype(obj).itemsize * 8
+ else:
+ return info.bits
+
+
+def bitname(obj):
+ """Return a bit-width name for a given type object"""
+ bits = _bits_of(obj)
+ dt = dtype(obj)
+ char = dt.kind
+ base = _kind_name(dt)
+
+ if base == 'object':
+ bits = 0
+
+ if bits != 0:
+ char = "%s%d" % (char, bits // 8)
+
+ return base, bits, char
+
+
+def _add_types():
+ for name, info in _concrete_typeinfo.items():
+ # define C-name and insert typenum and typechar references also
+ allTypes[name] = info.type
+ sctypeDict[name] = info.type
+ sctypeDict[info.char] = info.type
+ sctypeDict[info.num] = info.type
+
+ for name, cls in _abstract_types.items():
+ allTypes[name] = cls
+_add_types()
+
+# This is the priority order used to assign the bit-sized NPY_INTxx names, which
+# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
+# consistent.
+# If two C types have the same size, then the earliest one in this list is used
+# as the sized name.
+_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
+_uint_ctypes = list('u' + t for t in _int_ctypes)
+
+def _add_aliases():
+ for name, info in _concrete_typeinfo.items():
+ # these are handled by _add_integer_aliases
+ if name in _int_ctypes or name in _uint_ctypes:
+ continue
+
+ # insert bit-width version for this class (if relevant)
+ base, bit, char = bitname(info.type)
+
+ myname = "%s%d" % (base, bit)
+
+ # ensure that (c)longdouble does not overwrite the aliases assigned to
+ # (c)double
+ if name in ('longdouble', 'clongdouble') and myname in allTypes:
+ continue
+
+ base_capitalize = english_capitalize(base)
+ if base == 'complex':
+ na_name = '%s%d' % (base_capitalize, bit//2)
+ elif base == 'bool':
+ na_name = base_capitalize
+ else:
+ na_name = "%s%d" % (base_capitalize, bit)
+
+ allTypes[myname] = info.type
+
+ # add mapping for both the bit name and the numarray name
+ sctypeDict[myname] = info.type
+ sctypeDict[na_name] = info.type
+
+ # add forward, reverse, and string mapping to numarray
+ sctypeNA[na_name] = info.type
+ sctypeNA[info.type] = na_name
+ sctypeNA[info.char] = na_name
+
+ sctypeDict[char] = info.type
+ sctypeNA[char] = na_name
+_add_aliases()
+
+def _add_integer_aliases():
+ seen_bits = set()
+ for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
+ i_info = _concrete_typeinfo[i_ctype]
+ u_info = _concrete_typeinfo[u_ctype]
+ bits = i_info.bits # same for both
+
+ for info, charname, intname, Intname in [
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
+ if bits not in seen_bits:
+ # sometimes two different types have the same number of bits
+ # if so, the one iterated over first takes precedence
+ allTypes[intname] = info.type
+ sctypeDict[intname] = info.type
+ sctypeDict[Intname] = info.type
+ sctypeDict[charname] = info.type
+ sctypeNA[Intname] = info.type
+ sctypeNA[charname] = info.type
+ sctypeNA[info.type] = Intname
+ sctypeNA[info.char] = Intname
+
+ seen_bits.add(bits)
+
+_add_integer_aliases()
+
+# We use these later
+void = allTypes['void']
+
+#
+# Rework the Python names (so that float and complex and int are consistent
+# with Python usage)
+#
+def _set_up_aliases():
+ type_pairs = [('complex_', 'cdouble'),
+ ('int0', 'intp'),
+ ('uint0', 'uintp'),
+ ('single', 'float'),
+ ('csingle', 'cfloat'),
+ ('singlecomplex', 'cfloat'),
+ ('float_', 'double'),
+ ('intc', 'int'),
+ ('uintc', 'uint'),
+ ('int_', 'long'),
+ ('uint', 'ulong'),
+ ('cfloat', 'cdouble'),
+ ('longfloat', 'longdouble'),
+ ('clongfloat', 'clongdouble'),
+ ('longcomplex', 'clongdouble'),
+ ('bool_', 'bool'),
+ ('bytes_', 'string'),
+ ('string_', 'string'),
+ ('unicode_', 'unicode'),
+ ('object_', 'object')]
+ if sys.version_info[0] >= 3:
+ type_pairs.extend([('str_', 'unicode')])
+ else:
+ type_pairs.extend([('str_', 'string')])
+ for alias, t in type_pairs:
+ allTypes[alias] = allTypes[t]
+ sctypeDict[alias] = sctypeDict[t]
+ # Remove aliases overriding python types and modules
+ to_remove = ['ulong', 'object', 'int', 'float',
+ 'complex', 'bool', 'string', 'datetime', 'timedelta']
+ if sys.version_info[0] >= 3:
+ to_remove.extend(['bytes', 'str'])
+ else:
+ to_remove.extend(['unicode', 'long'])
+
+ for t in to_remove:
+ try:
+ del allTypes[t]
+ del sctypeDict[t]
+ except KeyError:
+ pass
+_set_up_aliases()
+
+
+sctypes = {'int': [],
+ 'uint':[],
+ 'float':[],
+ 'complex':[],
+ 'others':[bool, object, bytes, unicode, void]}
+
+def _add_array_type(typename, bits):
+ try:
+ t = allTypes['%s%d' % (typename, bits)]
+ except KeyError:
+ pass
+ else:
+ sctypes[typename].append(t)
+
+def _set_array_types():
+ ibytes = [1, 2, 4, 8, 16, 32, 64]
+ fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
+ for bytes in ibytes:
+ bits = 8*bytes
+ _add_array_type('int', bits)
+ _add_array_type('uint', bits)
+ for bytes in fbytes:
+ bits = 8*bytes
+ _add_array_type('float', bits)
+ _add_array_type('complex', 2*bits)
+ _gi = dtype('p')
+ if _gi.type not in sctypes['int']:
+ indx = 0
+ sz = _gi.itemsize
+ _lst = sctypes['int']
+ while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
+ indx += 1
+ sctypes['int'].insert(indx, _gi.type)
+ sctypes['uint'].insert(indx, dtype('P').type)
+_set_array_types()
+
+
+# Add additional strings to the sctypeDict
+_toadd = ['int', 'float', 'complex', 'bool', 'object']
+if sys.version_info[0] >= 3:
+ _toadd.extend(['str', 'bytes', ('a', 'bytes_')])
+else:
+ _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')])
+
+for name in _toadd:
+ if isinstance(name, tuple):
+ sctypeDict[name[0]] = allTypes[name[1]]
+ else:
+ sctypeDict[name] = allTypes['%s_' % name]
+
+del _toadd, name
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc
new file mode 100644
index 0000000..adc74dc
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_type_aliases.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so b/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so
new file mode 100755
index 0000000..0b30cd2
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/_umath_tests.so differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py
new file mode 100644
index 0000000..6a71de2
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.py
@@ -0,0 +1,1631 @@
+"""Array printing function
+
+$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+
+"""
+from __future__ import division, absolute_import, print_function
+
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+ "set_printoptions", "get_printoptions", "printoptions",
+ "format_float_positional", "format_float_scientific"]
+__docformat__ = 'restructuredtext'
+
+#
+# Written by Konrad Hinsen
+# last revision: 1996-3-13
+# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
+# and by Perry Greenfield 2000-4-1 for numarray
+# and by Travis Oliphant 2005-8-22 for numpy
+
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
+import sys
+import functools
+import numbers
+if sys.version_info[0] >= 3:
+ try:
+ from _thread import get_ident
+ except ImportError:
+ from _dummy_thread import get_ident
+else:
+ try:
+ from thread import get_ident
+ except ImportError:
+ from dummy_thread import get_ident
+
+import numpy as np
+from . import numerictypes as _nt
+from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat
+from . import multiarray
+from .multiarray import (array, dragon4_positional, dragon4_scientific,
+ datetime_as_string, datetime_data, ndarray,
+ set_legacy_print_mode)
+from .fromnumeric import ravel, any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+ flexible)
+from .overrides import array_function_dispatch, set_module
+import warnings
+import contextlib
+
+_format_options = {
+ 'edgeitems': 3, # repr N leading and trailing items of each dimension
+ 'threshold': 1000, # total items > triggers array summarization
+ 'floatmode': 'maxprec',
+ 'precision': 8, # precision of floating point representations
+ 'suppress': False, # suppress printing small floating values in exp format
+ 'linewidth': 75,
+ 'nanstr': 'nan',
+ 'infstr': 'inf',
+ 'sign': '-',
+ 'formatter': None,
+ 'legacy': False}
+
+def _make_options_dict(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ sign=None, formatter=None, floatmode=None, legacy=None):
+ """ make a dictionary out of the non-None arguments, plus sanity checks """
+
+ options = {k: v for k, v in locals().items() if v is not None}
+
+ if suppress is not None:
+ options['suppress'] = bool(suppress)
+
+ modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
+ if floatmode not in modes + [None]:
+ raise ValueError("floatmode option must be one of " +
+ ", ".join('"{}"'.format(m) for m in modes))
+
+ if sign not in [None, '-', '+', ' ']:
+ raise ValueError("sign option must be one of ' ', '+', or '-'")
+
+ if legacy not in [None, False, '1.13']:
+ warnings.warn("legacy printing option can currently only be '1.13' or "
+ "`False`", stacklevel=3)
+ if threshold is not None:
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
+ if not isinstance(threshold, numbers.Number) or np.isnan(threshold):
+ raise ValueError("threshold must be numeric and non-NAN, try "
+ "sys.maxsize for untruncated representation")
+ return options
+
+
+@set_module('numpy')
+def set_printoptions(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ formatter=None, sign=None, floatmode=None, **kwarg):
+ """
+ Set printing options.
+
+ These options determine the way floating point numbers, arrays and
+ other NumPy objects are displayed.
+
+ Parameters
+ ----------
+ precision : int or None, optional
+ Number of digits of precision for floating point output (default 8).
+ May be `None` if `floatmode` is not `fixed`, to print as many digits as
+ necessary to uniquely specify the value.
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr (default 1000).
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension (default 3).
+ linewidth : int, optional
+ The number of characters per line for the purpose of inserting
+ line breaks (default 75).
+ suppress : bool, optional
+ If True, always print floating point numbers using fixed point
+ notation, in which case numbers equal to zero in the current precision
+ will print as zero. If False, then scientific notation is used when
+ absolute value of the smallest number is < 1e-4 or the ratio of the
+ maximum absolute value to the minimum is > 1e3. The default is False.
+ nanstr : str, optional
+ String representation of floating point not-a-number (default nan).
+ infstr : str, optional
+ String representation of floating point infinity (default inf).
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values. (default '-')
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'object' : `np.object_` arrays
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types. Can take the following values:
+
+ * 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ * 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ * 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ * 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. If set to
+ `False`, disables legacy mode. Unrecognized strings will be ignored
+ with a warning for forward compatibility.
+
+ .. versionadded:: 1.14.0
+
+ See Also
+ --------
+ get_printoptions, set_string_function, array2string
+
+ Notes
+ -----
+ `formatter` is always reset with a call to `set_printoptions`.
+
+ Examples
+ --------
+ Floating point precision can be set:
+
+ >>> np.set_printoptions(precision=4)
+ >>> print(np.array([1.123456789]))
+ [ 1.1235]
+
+ Long arrays can be summarised:
+
+ >>> np.set_printoptions(threshold=5)
+ >>> print(np.arange(10))
+ [0 1 2 ..., 7 8 9]
+
+ Small results can be suppressed:
+
+ >>> eps = np.finfo(float).eps
+ >>> x = np.arange(4.)
+ >>> x**2 - (x + eps)**2
+ array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
+ >>> np.set_printoptions(suppress=True)
+ >>> x**2 - (x + eps)**2
+ array([-0., -0., 0., 0.])
+
+ A custom formatter can be used to display array elements as desired:
+
+ >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
+ >>> x = np.arange(3)
+ >>> x
+ array([int: 0, int: -1, int: -2])
+ >>> np.set_printoptions() # formatter gets reset
+ >>> x
+ array([0, 1, 2])
+
+ To put back the default options, you can use:
+
+ >>> np.set_printoptions(edgeitems=3,infstr='inf',
+ ... linewidth=75, nanstr='nan', precision=8,
+ ... suppress=False, threshold=1000, formatter=None)
+ """
+ legacy = kwarg.pop('legacy', None)
+ if kwarg:
+ msg = "set_printoptions() got unexpected keyword argument '{}'"
+ raise TypeError(msg.format(kwarg.popitem()[0]))
+
+ opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
+ suppress, nanstr, infstr, sign, formatter,
+ floatmode, legacy)
+ # formatter is always reset
+ opt['formatter'] = formatter
+ _format_options.update(opt)
+
+ # set the C variable for legacy mode
+ if _format_options['legacy'] == '1.13':
+ set_legacy_print_mode(113)
+ # reset the sign option in legacy mode to avoid confusion
+ _format_options['sign'] = '-'
+ elif _format_options['legacy'] is False:
+ set_legacy_print_mode(0)
+
+
+@set_module('numpy')
+def get_printoptions():
+ """
+ Return the current print options.
+
+ Returns
+ -------
+ print_opts : dict
+ Dictionary of current print options with keys
+
+ - precision : int
+ - threshold : int
+ - edgeitems : int
+ - linewidth : int
+ - suppress : bool
+ - nanstr : str
+ - infstr : str
+ - formatter : dict of callables
+ - sign : str
+
+ For a full description of these options, see `set_printoptions`.
+
+ See Also
+ --------
+ set_printoptions, set_string_function
+
+ """
+ return _format_options.copy()
+
+
+@set_module('numpy')
+@contextlib.contextmanager
+def printoptions(*args, **kwargs):
+ """Context manager for setting print options.
+
+ Set print options for the scope of the `with` block, and restore the old
+ options at the end. See `set_printoptions` for the full description of
+ available options.
+
+ Examples
+ --------
+
+ >>> with np.printoptions(precision=2):
+ ... print(np.array([2.0])) / 3
+ [0.67]
+
+ The `as`-clause of the `with`-statement gives the current print options:
+
+ >>> with np.printoptions(precision=2) as opts:
+ ... assert_equal(opts, np.get_printoptions())
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ """
+ opts = np.get_printoptions()
+ try:
+ np.set_printoptions(*args, **kwargs)
+ yield np.get_printoptions()
+ finally:
+ np.set_printoptions(**opts)
+
+
+def _leading_trailing(a, edgeitems, index=()):
+ """
+ Keep only the N-D corners (leading and trailing edges) of an array.
+
+ Should be passed a base-class ndarray, since it makes no guarantees about
+ preserving subclasses.
+ """
+ axis = len(index)
+ if axis == a.ndim:
+ return a[index]
+
+ if a.shape[axis] > 2*edgeitems:
+ return concatenate((
+ _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
+ _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
+ ), axis=axis)
+ else:
+ return _leading_trailing(a, edgeitems, index + np.index_exp[:])
+
+
+def _object_format(o):
+ """ Object arrays containing lists should be printed unambiguously """
+ if type(o) is list:
+ fmt = 'list({!r})'
+ else:
+ fmt = '{!r}'
+ return fmt.format(o)
+
+def repr_format(x):
+ return repr(x)
+
+def str_format(x):
+ return str(x)
+
+def _get_formatdict(data, **opt):
+ prec, fmode = opt['precision'], opt['floatmode']
+ supp, sign = opt['suppress'], opt['sign']
+ legacy = opt['legacy']
+
+ # wrapped in lambdas to avoid taking a code path with the wrong type of data
+ formatdict = {
+ 'bool': lambda: BoolFormat(data),
+ 'int': lambda: IntegerFormat(data),
+ 'float': lambda:
+ FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'longfloat': lambda:
+ FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'complexfloat': lambda:
+ ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'longcomplexfloat': lambda:
+ ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'datetime': lambda: DatetimeFormat(data, legacy=legacy),
+ 'timedelta': lambda: TimedeltaFormat(data),
+ 'object': lambda: _object_format,
+ 'void': lambda: str_format,
+ 'numpystr': lambda: repr_format,
+ 'str': lambda: str}
+
+ # we need to wrap values in `formatter` in a lambda, so that the interface
+ # is the same as the above values.
+ def indirect(x):
+ return lambda: x
+
+ formatter = opt['formatter']
+ if formatter is not None:
+ fkeys = [k for k in formatter.keys() if formatter[k] is not None]
+ if 'all' in fkeys:
+ for key in formatdict.keys():
+ formatdict[key] = indirect(formatter['all'])
+ if 'int_kind' in fkeys:
+ for key in ['int']:
+ formatdict[key] = indirect(formatter['int_kind'])
+ if 'float_kind' in fkeys:
+ for key in ['float', 'longfloat']:
+ formatdict[key] = indirect(formatter['float_kind'])
+ if 'complex_kind' in fkeys:
+ for key in ['complexfloat', 'longcomplexfloat']:
+ formatdict[key] = indirect(formatter['complex_kind'])
+ if 'str_kind' in fkeys:
+ for key in ['numpystr', 'str']:
+ formatdict[key] = indirect(formatter['str_kind'])
+ for key in formatdict.keys():
+ if key in fkeys:
+ formatdict[key] = indirect(formatter[key])
+
+ return formatdict
+
+def _get_format_function(data, **options):
+ """
+ find the right formatting function for the dtype_
+ """
+ dtype_ = data.dtype
+ dtypeobj = dtype_.type
+ formatdict = _get_formatdict(data, **options)
+ if issubclass(dtypeobj, _nt.bool_):
+ return formatdict['bool']()
+ elif issubclass(dtypeobj, _nt.integer):
+ if issubclass(dtypeobj, _nt.timedelta64):
+ return formatdict['timedelta']()
+ else:
+ return formatdict['int']()
+ elif issubclass(dtypeobj, _nt.floating):
+ if issubclass(dtypeobj, _nt.longfloat):
+ return formatdict['longfloat']()
+ else:
+ return formatdict['float']()
+ elif issubclass(dtypeobj, _nt.complexfloating):
+ if issubclass(dtypeobj, _nt.clongfloat):
+ return formatdict['longcomplexfloat']()
+ else:
+ return formatdict['complexfloat']()
+ elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
+ return formatdict['numpystr']()
+ elif issubclass(dtypeobj, _nt.datetime64):
+ return formatdict['datetime']()
+ elif issubclass(dtypeobj, _nt.object_):
+ return formatdict['object']()
+ elif issubclass(dtypeobj, _nt.void):
+ if dtype_.names is not None:
+ return StructuredVoidFormat.from_data(data, **options)
+ else:
+ return formatdict['void']()
+ else:
+ return formatdict['numpystr']()
+
+
+def _recursive_guard(fillvalue='...'):
+ """
+ Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
+
+ Decorates a function such that if it calls itself with the same first
+ argument, it returns `fillvalue` instead of recursing.
+
+ Largely copied from reprlib.recursive_repr
+ """
+
+ def decorating_function(f):
+ repr_running = set()
+
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ return f(self, *args, **kwargs)
+ finally:
+ repr_running.discard(key)
+
+ return wrapper
+
+ return decorating_function
+
+
+# gracefully handle recursive calls, when object arrays contain themselves
+@_recursive_guard()
+def _array2string(a, options, separator=' ', prefix=""):
+ # The formatter __init__s in _get_format_function cannot deal with
+ # subclasses yet, and we also need to avoid recursion issues in
+ # _formatArray with subclasses which return 0d arrays in place of scalars
+ data = asarray(a)
+ if a.shape == ():
+ a = data
+
+ if a.size > options['threshold']:
+ summary_insert = "..."
+ data = _leading_trailing(data, options['edgeitems'])
+ else:
+ summary_insert = ""
+
+ # find the right formatting function for the array
+ format_function = _get_format_function(data, **options)
+
+ # skip over "["
+ next_line_prefix = " "
+ # skip over array(
+ next_line_prefix += " "*len(prefix)
+
+ lst = _formatArray(a, format_function, options['linewidth'],
+ next_line_prefix, separator, options['edgeitems'],
+ summary_insert, options['legacy'])
+ return lst
+
+
+def _array2string_dispatcher(
+ a, max_line_width=None, precision=None,
+ suppress_small=None, separator=None, prefix=None,
+ style=None, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
+ **kwarg):
+ return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
+def array2string(a, max_line_width=None, precision=None,
+ suppress_small=None, separator=' ', prefix="",
+ style=np._NoValue, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix="",
+ **kwarg):
+ """
+ Return a string representation of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters splits the string appropriately after array elements.
+ precision : int or None, optional
+ Floating point precision. Default is the current printing
+ precision (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero. A number is "very small" if it
+ is smaller than the current printing precision.
+ separator : str, optional
+ Inserted between elements.
+ prefix : str, optional
+ suffix: str, optional
+ The length of the prefix and suffix strings are used to respectively
+ align and wrap the output. An array is typically printed as::
+
+ prefix + array2string(a) + suffix
+
+ The output is left-padded by the length of the prefix string, and
+ wrapping is forced at the column ``max_line_width - len(suffix)``.
+ It should be noted that the content of prefix and suffix strings are
+ not included in the output.
+ style : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.14.0
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'void' : type `numpy.void`
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'str' : all other strings
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'str' and 'numpystr'
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension.
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values.
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types. Can take the following values:
+
+ - 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ - 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ - 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ - 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. If set to
+ `False`, disables legacy mode. Unrecognized strings will be ignored
+ with a warning for forward compatibility.
+
+ .. versionadded:: 1.14.0
+
+ Returns
+ -------
+ array_str : str
+ String representation of the array.
+
+ Raises
+ ------
+ TypeError
+ if a callable in `formatter` does not return a string.
+
+ See Also
+ --------
+ array_str, array_repr, set_printoptions, get_printoptions
+
+ Notes
+ -----
+ If a formatter is specified for a certain type, the `precision` keyword is
+ ignored for that type.
+
+ This is a very flexible function; `array_repr` and `array_str` are using
+ `array2string` internally so keywords with the same name should work
+ identically in all three functions.
+
+ Examples
+ --------
+ >>> x = np.array([1e-16,1,2,3])
+ >>> print(np.array2string(x, precision=2, separator=',',
+ ... suppress_small=True))
+ [ 0., 1., 2., 3.]
+
+ >>> x = np.arange(3.)
+ >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
+ '[0.00 1.00 2.00]'
+
+ >>> x = np.arange(3)
+ >>> np.array2string(x, formatter={'int':lambda x: hex(x)})
+ '[0x0L 0x1L 0x2L]'
+
+ """
+ legacy = kwarg.pop('legacy', None)
+ if kwarg:
+ msg = "array2string() got unexpected keyword argument '{}'"
+ raise TypeError(msg.format(kwarg.popitem()[0]))
+
+ overrides = _make_options_dict(precision, threshold, edgeitems,
+ max_line_width, suppress_small, None, None,
+ sign, formatter, floatmode, legacy)
+ options = _format_options.copy()
+ options.update(overrides)
+
+ if options['legacy'] == '1.13':
+ if style is np._NoValue:
+ style = repr
+
+ if a.shape == () and not a.dtype.names:
+ return style(a.item())
+ elif style is not np._NoValue:
+ # Deprecation 11-9-2017 v1.14
+ warnings.warn("'style' argument is deprecated and no longer functional"
+ " except in 1.13 'legacy' mode",
+ DeprecationWarning, stacklevel=3)
+
+ if options['legacy'] != '1.13':
+ options['linewidth'] -= len(suffix)
+
+ # treat as a null array if any of shape elements == 0
+ if a.size == 0:
+ return "[]"
+
+ return _array2string(a, options, separator, prefix)
+
+
+def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
+ needs_wrap = len(line) + len(word) > line_width
+ if legacy != '1.13':
+ s# don't wrap lines if it won't help
+ if len(line) <= len(next_line_prefix):
+ needs_wrap = False
+
+ if needs_wrap:
+ s += line.rstrip() + "\n"
+ line = next_line_prefix
+ line += word
+ return s, line
+
+
+def _formatArray(a, format_function, line_width, next_line_prefix,
+ separator, edge_items, summary_insert, legacy):
+ """formatArray is designed for two modes of operation:
+
+ 1. Full output
+
+ 2. Summarized output
+
+ """
+ def recurser(index, hanging_indent, curr_width):
+ """
+ By using this local function, we don't need to recurse with all the
+ arguments. Since this function is not created recursively, the cost is
+ not significant
+ """
+ axis = len(index)
+ axes_left = a.ndim - axis
+
+ if axes_left == 0:
+ return format_function(a[index])
+
+ # when recursing, add a space to align with the [ added, and reduce the
+ # length of the line by 1
+ next_hanging_indent = hanging_indent + ' '
+ if legacy == '1.13':
+ next_width = curr_width
+ else:
+ next_width = curr_width - len(']')
+
+ a_len = a.shape[axis]
+ show_summary = summary_insert and 2*edge_items < a_len
+ if show_summary:
+ leading_items = edge_items
+ trailing_items = edge_items
+ else:
+ leading_items = 0
+ trailing_items = a_len
+
+ # stringify the array with the hanging indent on the first line too
+ s = ''
+
+ # last axis (rows) - wrap elements if they would not fit on one line
+ if axes_left == 1:
+ # the length up until the beginning of the separator / bracket
+ if legacy == '1.13':
+ elem_width = curr_width - len(separator.rstrip())
+ else:
+ elem_width = curr_width - max(len(separator.rstrip()), len(']'))
+
+ line = hanging_indent
+ for i in range(leading_items):
+ word = recurser(index + (i,), next_hanging_indent, next_width)
+ s, line = _extendLine(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if show_summary:
+ s, line = _extendLine(
+ s, line, summary_insert, elem_width, hanging_indent, legacy)
+ if legacy == '1.13':
+ line += ", "
+ else:
+ line += separator
+
+ for i in range(trailing_items, 1, -1):
+ word = recurser(index + (-i,), next_hanging_indent, next_width)
+ s, line = _extendLine(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if legacy == '1.13':
+ # width of the separator is not considered on 1.13
+ elem_width = curr_width
+ word = recurser(index + (-1,), next_hanging_indent, next_width)
+ s, line = _extendLine(
+ s, line, word, elem_width, hanging_indent, legacy)
+
+ s += line
+
+ # other axes - insert newlines between rows
+ else:
+ s = ''
+ line_sep = separator.rstrip() + '\n'*(axes_left - 1)
+
+ for i in range(leading_items):
+ nested = recurser(index + (i,), next_hanging_indent, next_width)
+ s += hanging_indent + nested + line_sep
+
+ if show_summary:
+ if legacy == '1.13':
+ # trailing space, fixed nbr of newlines, and fixed separator
+ s += hanging_indent + summary_insert + ", \n"
+ else:
+ s += hanging_indent + summary_insert + line_sep
+
+ for i in range(trailing_items, 1, -1):
+ nested = recurser(index + (-i,), next_hanging_indent,
+ next_width)
+ s += hanging_indent + nested + line_sep
+
+ nested = recurser(index + (-1,), next_hanging_indent, next_width)
+ s += hanging_indent + nested
+
+ # remove the hanging indent, and wrap in []
+ s = '[' + s[len(hanging_indent):] + ']'
+ return s
+
+ try:
+ # invoke the recursive part with an initial index and prefix
+ return recurser(index=(),
+ hanging_indent=next_line_prefix,
+ curr_width=line_width)
+ finally:
+ # recursive closures have a cyclic reference to themselves, which
+ # requires gc to collect (gh-10620). To avoid this problem, for
+ # performance and PyPy friendliness, we break the cycle:
+ recurser = None
+
+def _none_or_positive_arg(x, name):
+ if x is None:
+ return -1
+ if x < 0:
+ raise ValueError("{} must be >= 0".format(name))
+ return x
+
+class FloatingFormat(object):
+ """ Formatter for subtypes of np.floating """
+ def __init__(self, data, precision, floatmode, suppress_small, sign=False,
+ **kwarg):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ self._legacy = kwarg.get('legacy', False)
+ if self._legacy == '1.13':
+ # when not 0d, legacy does not support '-'
+ if data.shape != () and sign == '-':
+ sign = ' '
+
+ self.floatmode = floatmode
+ if floatmode == 'unique':
+ self.precision = None
+ else:
+ self.precision = precision
+
+ self.precision = _none_or_positive_arg(self.precision, 'precision')
+
+ self.suppress_small = suppress_small
+ self.sign = sign
+ self.exp_format = False
+ self.large_exponent = False
+
+ self.fillFormat(data)
+
+ def fillFormat(self, data):
+ # only the finite values are used to compute the number of digits
+ finite_vals = data[isfinite(data)]
+
+ # choose exponential mode based on the non-zero finite values:
+ abs_non_zero = absolute(finite_vals[finite_vals != 0])
+ if len(abs_non_zero) != 0:
+ max_val = np.max(abs_non_zero)
+ min_val = np.min(abs_non_zero)
+ with errstate(over='ignore'): # division can overflow
+ if max_val >= 1.e8 or (not self.suppress_small and
+ (min_val < 0.0001 or max_val/min_val > 1000.)):
+ self.exp_format = True
+
+ # do a first pass of printing all the numbers, to determine sizes
+ if len(finite_vals) == 0:
+ self.pad_left = 0
+ self.pad_right = 0
+ self.trim = '.'
+ self.exp_size = -1
+ self.unique = True
+ elif self.exp_format:
+ trim, unique = '.', True
+ if self.floatmode == 'fixed' or self._legacy == '1.13':
+ trim, unique = 'k', False
+ strs = (dragon4_scientific(x, precision=self.precision,
+ unique=unique, trim=trim, sign=self.sign == '+')
+ for x in finite_vals)
+ frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
+ int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
+ self.exp_size = max(len(s) for s in exp_strs) - 1
+
+ self.trim = 'k'
+ self.precision = max(len(s) for s in frac_part)
+
+ # for back-compat with np 1.13, use 2 spaces & sign and full prec
+ if self._legacy == '1.13':
+ self.pad_left = 3
+ else:
+ # this should be only 1 or 2. Can be calculated from sign.
+ self.pad_left = max(len(s) for s in int_part)
+ # pad_right is only needed for nan length calculation
+ self.pad_right = self.exp_size + 2 + self.precision
+
+ self.unique = False
+ else:
+ # first pass printing to determine sizes
+ trim, unique = '.', True
+ if self.floatmode == 'fixed':
+ trim, unique = 'k', False
+ strs = (dragon4_positional(x, precision=self.precision,
+ fractional=True,
+ unique=unique, trim=trim,
+ sign=self.sign == '+')
+ for x in finite_vals)
+ int_part, frac_part = zip(*(s.split('.') for s in strs))
+ if self._legacy == '1.13':
+ self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
+ else:
+ self.pad_left = max(len(s) for s in int_part)
+ self.pad_right = max(len(s) for s in frac_part)
+ self.exp_size = -1
+
+ if self.floatmode in ['fixed', 'maxprec_equal']:
+ self.precision = self.pad_right
+ self.unique = False
+ self.trim = 'k'
+ else:
+ self.unique = True
+ self.trim = '.'
+
+ if self._legacy != '1.13':
+ # account for sign = ' ' by adding one to pad_left
+ if self.sign == ' ' and not any(np.signbit(finite_vals)):
+ self.pad_left += 1
+
+ # if there are non-finite values, may need to increase pad_left
+ if data.size != finite_vals.size:
+ neginf = self.sign != '-' or any(data[isinf(data)] < 0)
+ nanlen = len(_format_options['nanstr'])
+ inflen = len(_format_options['infstr']) + neginf
+ offset = self.pad_right + 1 # +1 for decimal pt
+ self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
+
+ def __call__(self, x):
+ if not np.isfinite(x):
+ with errstate(invalid='ignore'):
+ if np.isnan(x):
+ sign = '+' if self.sign == '+' else ''
+ ret = sign + _format_options['nanstr']
+ else: # isinf
+ sign = '-' if x < 0 else '+' if self.sign == '+' else ''
+ ret = sign + _format_options['infstr']
+ return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
+
+ if self.exp_format:
+ return dragon4_scientific(x,
+ precision=self.precision,
+ unique=self.unique,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ exp_digits=self.exp_size)
+ else:
+ return dragon4_positional(x,
+ precision=self.precision,
+ unique=self.unique,
+ fractional=True,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ pad_right=self.pad_right)
+
+# for back-compatibility, we keep the classes for each float type too
+class FloatFormat(FloatingFormat):
+ def __init__(self, *args, **kwargs):
+ warnings.warn("FloatFormat has been replaced by FloatingFormat",
+ DeprecationWarning, stacklevel=2)
+ super(FloatFormat, self).__init__(*args, **kwargs)
+
+
+class LongFloatFormat(FloatingFormat):
+ def __init__(self, *args, **kwargs):
+ warnings.warn("LongFloatFormat has been replaced by FloatingFormat",
+ DeprecationWarning, stacklevel=2)
+ super(LongFloatFormat, self).__init__(*args, **kwargs)
+
+
+@set_module('numpy')
+def format_float_scientific(x, precision=None, unique=True, trim='k',
+ sign=False, pad_left=None, exp_digits=None):
+ """
+ Format a floating-point scalar as a decimal string in scientific notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ was omitted, print all necessary digits, otherwise digit generation is
+ cut off after `precision` digits and the remaining value is rounded.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value.
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ exp_digits : non-negative integer, optional
+ Pad the exponent with zeros until it contains at least this many digits.
+ If omitted, the exponent will be at least 2 digits.
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_positional
+
+ Examples
+ --------
+ >>> np.format_float_scientific(np.float32(np.pi))
+ '3.1415927e+00'
+ >>> s = np.float32(1.23e24)
+ >>> np.format_float_scientific(s, unique=False, precision=15)
+ '1.230000071797338e+24'
+ >>> np.format_float_scientific(s, exp_digits=4)
+ '1.23e+0024'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
+ return dragon4_scientific(x, precision=precision, unique=unique,
+ trim=trim, sign=sign, pad_left=pad_left,
+ exp_digits=exp_digits)
+
+
+@set_module('numpy')
+def format_float_positional(x, precision=None, unique=True,
+ fractional=True, trim='k', sign=False,
+ pad_left=None, pad_right=None):
+ """
+ Format a floating-point scalar as a decimal string in positional notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ was omitted, print out all necessary digits, otherwise digit generation
+ is cut off after `precision` digits and the remaining value is rounded.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value.
+ fractional : boolean, optional
+ If `True`, the cutoff of `precision` digits refers to the total number
+ of digits after the decimal point, including leading zeros.
+ If `False`, `precision` refers to the total number of significant
+ digits, before or after the decimal point, ignoring leading zeros.
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ pad_right : non-negative integer, optional
+ Pad the right side of the string with whitespace until at least that
+ many characters are to the right of the decimal point.
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+
+ Examples
+ --------
+ >>> np.format_float_positional(np.float32(np.pi))
+ '3.1415927'
+ >>> np.format_float_positional(np.float16(np.pi))
+ '3.14'
+ >>> np.format_float_positional(np.float16(0.3))
+ '0.3'
+ >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
+ '0.3000488281'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ pad_right = _none_or_positive_arg(pad_right, 'pad_right')
+ return dragon4_positional(x, precision=precision, unique=unique,
+ fractional=fractional, trim=trim,
+ sign=sign, pad_left=pad_left,
+ pad_right=pad_right)
+
+
+class IntegerFormat(object):
+ def __init__(self, data):
+ if data.size > 0:
+ max_str_len = max(len(str(np.max(data))),
+ len(str(np.min(data))))
+ else:
+ max_str_len = 0
+ self.format = '%{}d'.format(max_str_len)
+
+ def __call__(self, x):
+ return self.format % x
+
+
+class BoolFormat(object):
+ def __init__(self, data, **kwargs):
+ # add an extra space so " True" and "False" have the same length and
+ # array elements align nicely when printed, except in 0d arrays
+ self.truestr = ' True' if data.shape != () else 'True'
+
+ def __call__(self, x):
+ return self.truestr if x else "False"
+
+
+class ComplexFloatingFormat(object):
+ """ Formatter for subtypes of np.complexfloating """
+ def __init__(self, x, precision, floatmode, suppress_small,
+ sign=False, **kwarg):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ floatmode_real = floatmode_imag = floatmode
+ if kwarg.get('legacy', False) == '1.13':
+ floatmode_real = 'maxprec_equal'
+ floatmode_imag = 'maxprec'
+
+ self.real_format = FloatingFormat(x.real, precision, floatmode_real,
+ suppress_small, sign=sign, **kwarg)
+ self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
+ suppress_small, sign='+', **kwarg)
+
+ def __call__(self, x):
+ r = self.real_format(x.real)
+ i = self.imag_format(x.imag)
+
+ # add the 'j' before the terminal whitespace in i
+ sp = len(i.rstrip())
+ i = i[:sp] + 'j' + i[sp:]
+
+ return r + i
+
+# for back-compatibility, we keep the classes for each complex type too
+class ComplexFormat(ComplexFloatingFormat):
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "ComplexFormat has been replaced by ComplexFloatingFormat",
+ DeprecationWarning, stacklevel=2)
+ super(ComplexFormat, self).__init__(*args, **kwargs)
+
+class LongComplexFormat(ComplexFloatingFormat):
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "LongComplexFormat has been replaced by ComplexFloatingFormat",
+ DeprecationWarning, stacklevel=2)
+ super(LongComplexFormat, self).__init__(*args, **kwargs)
+
+
+class _TimelikeFormat(object):
+ def __init__(self, data):
+ non_nat = data[~isnat(data)]
+ if len(non_nat) > 0:
+ # Max str length of non-NaT elements
+ max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
+ len(self._format_non_nat(np.min(non_nat))))
+ else:
+ max_str_len = 0
+ if len(non_nat) < data.size:
+ # data contains a NaT
+ max_str_len = max(max_str_len, 5)
+ self._format = '%{}s'.format(max_str_len)
+ self._nat = "'NaT'".rjust(max_str_len)
+
+ def _format_non_nat(self, x):
+ # override in subclass
+ raise NotImplementedError
+
+ def __call__(self, x):
+ if isnat(x):
+ return self._nat
+ else:
+ return self._format % self._format_non_nat(x)
+
+
+class DatetimeFormat(_TimelikeFormat):
+ def __init__(self, x, unit=None, timezone=None, casting='same_kind',
+ legacy=False):
+ # Get the unit from the dtype
+ if unit is None:
+ if x.dtype.kind == 'M':
+ unit = datetime_data(x.dtype)[0]
+ else:
+ unit = 's'
+
+ if timezone is None:
+ timezone = 'naive'
+ self.timezone = timezone
+ self.unit = unit
+ self.casting = casting
+ self.legacy = legacy
+
+ # must be called after the above are configured
+ super(DatetimeFormat, self).__init__(x)
+
+ def __call__(self, x):
+ if self.legacy == '1.13':
+ return self._format_non_nat(x)
+ return super(DatetimeFormat, self).__call__(x)
+
+ def _format_non_nat(self, x):
+ return "'%s'" % datetime_as_string(x,
+ unit=self.unit,
+ timezone=self.timezone,
+ casting=self.casting)
+
+
+class TimedeltaFormat(_TimelikeFormat):
+ def _format_non_nat(self, x):
+ return str(x.astype('i8'))
+
+
+class SubArrayFormat(object):
+ def __init__(self, format_function):
+ self.format_function = format_function
+
+ def __call__(self, arr):
+ if arr.ndim <= 1:
+ return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
+ return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
+
+
+class StructuredVoidFormat(object):
+ """
+ Formatter for structured np.void objects.
+
+ This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
+ as alias scalars lose their field information, and the implementation
+ relies upon np.void.__getitem__.
+ """
+ def __init__(self, format_functions):
+ self.format_functions = format_functions
+
+ @classmethod
+ def from_data(cls, data, **options):
+ """
+ This is a second way to initialize StructuredVoidFormat, using the raw data
+ as input. Added to avoid changing the signature of __init__.
+ """
+ format_functions = []
+ for field_name in data.dtype.names:
+ format_function = _get_format_function(data[field_name], **options)
+ if data.dtype[field_name].shape != ():
+ format_function = SubArrayFormat(format_function)
+ format_functions.append(format_function)
+ return cls(format_functions)
+
+ def __call__(self, x):
+ str_fields = [
+ format_function(field)
+ for field, format_function in zip(x, self.format_functions)
+ ]
+ if len(str_fields) == 1:
+ return "({},)".format(str_fields[0])
+ else:
+ return "({})".format(", ".join(str_fields))
+
+
+# for backwards compatibility
+class StructureFormat(StructuredVoidFormat):
+ def __init__(self, *args, **kwargs):
+ # NumPy 1.14, 2018-02-14
+ warnings.warn(
+ "StructureFormat has been replaced by StructuredVoidFormat",
+ DeprecationWarning, stacklevel=2)
+ super(StructureFormat, self).__init__(*args, **kwargs)
+
+
+def _void_scalar_repr(x):
+ """
+ Implements the repr for structured-void scalars. It is called from the
+ scalartypes.c.src code, and is placed here because it uses the elementwise
+ formatters defined above.
+ """
+ return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
+
+
+_typelessdata = [int_, float_, complex_, bool_]
+if issubclass(intc, int):
+ _typelessdata.append(intc)
+if issubclass(longlong, int):
+ _typelessdata.append(longlong)
+
+
+def dtype_is_implied(dtype):
+ """
+ Determine if the given dtype is implied by the representation of its values.
+
+ Parameters
+ ----------
+ dtype : dtype
+ Data type
+
+ Returns
+ -------
+ implied : bool
+ True if the dtype is implied by the representation of its values.
+
+ Examples
+ --------
+ >>> np.core.arrayprint.dtype_is_implied(int)
+ True
+ >>> np.array([1, 2, 3], int)
+ array([1, 2, 3])
+ >>> np.core.arrayprint.dtype_is_implied(np.int8)
+ False
+ >>> np.array([1, 2, 3], np.int8)
+ array([1, 2, 3], dtype=np.int8)
+ """
+ dtype = np.dtype(dtype)
+ if _format_options['legacy'] == '1.13' and dtype.type == bool_:
+ return False
+
+ # not just void types can be structured, and names are not part of the repr
+ if dtype.names is not None:
+ return False
+
+ return dtype.type in _typelessdata
+
+
+def dtype_short_repr(dtype):
+ """
+ Convert a dtype to a short form which evaluates to the same dtype.
+
+ The intent is roughly that the following holds
+
+ >>> from numpy import *
+ >>> assert eval(dtype_short_repr(dt)) == dt
+ """
+ if dtype.names is not None:
+ # structured dtypes give a list or tuple repr
+ return str(dtype)
+ elif issubclass(dtype.type, flexible):
+ # handle these separately so they don't give garbage like str256
+ return "'%s'" % str(dtype)
+
+ typename = dtype.name
+ # quote typenames which can't be represented as python variable names
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
+ typename = repr(typename)
+
+ return typename
+
+
+def _array_repr_implementation(
+ arr, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_repr() that allows overriding array2string."""
+ if max_line_width is None:
+ max_line_width = _format_options['linewidth']
+
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
+
+ prefix = class_name + "("
+ suffix = ")" if skipdtype else ","
+
+ if (_format_options['legacy'] == '1.13' and
+ arr.shape == () and not arr.dtype.names):
+ lst = repr(arr.item())
+ elif arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', prefix, suffix=suffix)
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ arr_str = prefix + lst + suffix
+
+ if skipdtype:
+ return arr_str
+
+ dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
+
+ # compute whether we should put dtype on a new line: Do so if adding the
+ # dtype would extend the last line past max_line_width.
+ # Note: This line gives the correct result even when rfind returns -1.
+ last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
+ spacer = " "
+ if _format_options['legacy'] == '1.13':
+ if issubclass(arr.dtype.type, flexible):
+ spacer = '\n' + ' '*len(class_name + "(")
+ elif last_line_len + len(dtype_str) + 1 > max_line_width:
+ spacer = '\n' + ' '*len(class_name + "(")
+
+ return arr_str + spacer + dtype_str
+
+
+def _array_repr_dispatcher(
+ arr, max_line_width=None, precision=None, suppress_small=None):
+ return (arr,)
+
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters split the string appropriately after array elements.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small
+ is defined by `precision`, if the precision is 8 then
+ numbers smaller than 5e-9 are represented as zero.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([ 0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([ 0.000001, 0. , 2. , 3. ])'
+
+ """
+ return _array_repr_implementation(
+ arr, max_line_width, precision, suppress_small)
+
+
+_guarded_str = _recursive_guard()(str)
+
+
+def _array_str_implementation(
+ a, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_str() that allows overriding array2string."""
+ if (_format_options['legacy'] == '1.13' and
+ a.shape == () and not a.dtype.names):
+ return str(a.item())
+
+ # the str of 0d arrays is a special case: It should appear like a scalar,
+ # so floats are not truncated by `precision`, and strings are not wrapped
+ # in quotes. So we return the str of the scalar value.
+ if a.shape == ():
+ # obtain a scalar and call str on it, avoiding problems for subclasses
+ # for which indexing with () returns a 0d instead of a scalar by using
+ # ndarray's getindex. Also guard against recursive 0d object arrays.
+ return _guarded_str(np.ndarray.__getitem__(a, ()))
+
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+
+def _array_str_dispatcher(
+ a, max_line_width=None, precision=None, suppress_small=None):
+ return (a,)
+
+
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`. The
+ default is, indirectly, 75.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return _array_str_implementation(
+ a, max_line_width, precision, suppress_small)
+
+
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+ array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+ array2string=_array2string_impl)
+
+
+def set_string_function(f, repr=True):
+ """
+ Set a Python function to be used when pretty printing arrays.
+
+ Parameters
+ ----------
+ f : function or None
+ Function to be used to pretty print arrays. The function should expect
+ a single array argument and return a string of the representation of
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ Examples
+ --------
+ >>> def pprint(arr):
+ ... return 'HA! - What are you going to do now?'
+ ...
+ >>> np.set_string_function(pprint)
+ >>> a = np.arange(10)
+ >>> a
+ HA! - What are you going to do now?
+ >>> print(a)
+ [0 1 2 3 4 5 6 7 8 9]
+
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([ 0, 1, 2, 3])'
+
+ """
+ if f is None:
+ if repr:
+ return multiarray.set_string_function(_default_array_repr, 1)
+ else:
+ return multiarray.set_string_function(_default_array_str, 0)
+ else:
+ return multiarray.set_string_function(f, repr)
+
+set_string_function(_default_array_str, 0)
+set_string_function(_default_array_repr, 1)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc
new file mode 100644
index 0000000..9f94b0d
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/arrayprint.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py
new file mode 100644
index 0000000..7995dd9
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.py
@@ -0,0 +1,15 @@
+"""Simple script to compute the api hash of the current API.
+
+The API has is defined by numpy_api_order and ufunc_api_order.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from os.path import dirname
+
+from code_generators.genapi import fullapi_hash
+from code_generators.numpy_api import full_api
+
+if __name__ == '__main__':
+ curdir = dirname(__file__)
+ print(fullapi_hash(full_api))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc
new file mode 100644
index 0000000..f4ef3cb
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/cversions.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py
new file mode 100644
index 0000000..12ba3f0
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.py
@@ -0,0 +1,2835 @@
+"""
+This module contains a set of functions for vectorized string
+operations and methods.
+
+.. note::
+ The `chararray` class exists for backwards compatibility with
+ Numarray, it is not recommended for new development. Starting from numpy
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
+ `dtype` `object_`, `string_` or `unicode_`, and use the free functions
+ in the `numpy.char` module for fast vectorized string operations.
+
+Some methods will only be available if the corresponding string method is
+available in your version of Python.
+
+The preferred alias for `defchararray` is `numpy.char`.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import functools
+import sys
+from .numerictypes import string_, unicode_, integer, object_, bool_, character
+from .numeric import ndarray, compare_chararrays
+from .numeric import array as narray
+from numpy.core.multiarray import _vec_string
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from numpy.compat import asbytes, long
+import numpy
+
+__all__ = [
+ 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal',
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
+ 'array', 'asarray'
+ ]
+
+
+_globalvar = 0
+if sys.version_info[0] >= 3:
+ _unicode = str
+ _bytes = bytes
+else:
+ _unicode = unicode
+ _bytes = str
+_len = len
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.char')
+
+
+def _use_unicode(*args):
+ """
+ Helper function for determining the output type of some string
+ operations.
+
+ For an operation on two ndarrays, if at least one is unicode, the
+ result should be unicode.
+ """
+ for x in args:
+ if (isinstance(x, _unicode) or
+ issubclass(numpy.asarray(x).dtype.type, unicode_)):
+ return unicode_
+ return string_
+
+def _to_string_or_unicode_array(result):
+ """
+ Helper function to cast a result back into a string or unicode array
+ if an object array must be used as an intermediary.
+ """
+ return numpy.asarray(result.tolist())
+
+def _clean_args(*args):
+ """
+ Helper function for delegating arguments to Python string
+ functions.
+
+ Many of the Python string operations that have optional arguments
+ do not use 'None' to indicate a default value. In these cases,
+ we need to remove all `None` arguments, and those following them.
+ """
+ newargs = []
+ for chk in args:
+ if chk is None:
+ break
+ newargs.append(chk)
+ return newargs
+
+def _get_num_chars(a):
+ """
+ Helper function that returns the number of characters per field in
+ a string or unicode array. This is to abstract out the fact that
+ for a unicode array this is itemsize / 4.
+ """
+ if issubclass(a.dtype.type, unicode_):
+ return a.itemsize // 4
+ return a.itemsize
+
+
+def _binary_op_dispatcher(x1, x2):
+ return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def equal(x1, x2):
+ """
+ Return (x1 == x2) element-wise.
+
+ Unlike `numpy.equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ not_equal, greater_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '==', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def not_equal(x1, x2):
+ """
+ Return (x1 != x2) element-wise.
+
+ Unlike `numpy.not_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ equal, greater_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '!=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater_equal(x1, x2):
+ """
+ Return (x1 >= x2) element-wise.
+
+ Unlike `numpy.greater_equal`, this comparison is performed by
+ first stripping whitespace characters from the end of the string.
+ This behavior is provided for backward-compatibility with
+ numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ equal, not_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '>=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less_equal(x1, x2):
+ """
+ Return (x1 <= x2) element-wise.
+
+ Unlike `numpy.less_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '<=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater(x1, x2):
+ """
+ Return (x1 > x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, less
+ """
+ return compare_chararrays(x1, x2, '>', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less(x1, x2):
+ """
+ Return (x1 < x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray or bool
+ Output array of bools, or a single bool if x1 and x2 are scalars.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, greater
+ """
+ return compare_chararrays(x1, x2, '<', True)
+
+
+def _unary_op_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def str_len(a):
+ """
+ Return len(a) element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of integers
+
+ See also
+ --------
+ __builtin__.len
+ """
+ return _vec_string(a, integer, '__len__')
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def add(x1, x2):
+ """
+ Return element-wise string concatenation for two arrays of str or unicode.
+
+ Arrays `x1` and `x2` must have the same shape.
+
+ Parameters
+ ----------
+ x1 : array_like of str or unicode
+ Input array.
+ x2 : array_like of str or unicode
+ Input array.
+
+ Returns
+ -------
+ add : ndarray
+ Output array of `string_` or `unicode_`, depending on input types
+ of the same shape as `x1` and `x2`.
+
+ """
+ arr1 = numpy.asarray(x1)
+ arr2 = numpy.asarray(x2)
+ out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
+ dtype = _use_unicode(arr1, arr2)
+ return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
+
+
+def _multiply_dispatcher(a, i):
+ return (a,)
+
+
+@array_function_dispatch(_multiply_dispatcher)
+def multiply(a, i):
+ """
+ Return (a * i), that is string multiple concatenation,
+ element-wise.
+
+ Values in `i` of less than 0 are treated as 0 (which yields an
+ empty string).
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ i : array_like of ints
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ """
+ a_arr = numpy.asarray(a)
+ i_arr = numpy.asarray(i)
+ if not issubclass(i_arr.dtype.type, integer):
+ raise ValueError("Can only multiply by integers")
+ out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
+
+
+def _mod_dispatcher(a, values):
+ return (a, values)
+
+
+@array_function_dispatch(_mod_dispatcher)
+def mod(a, values):
+ """
+ Return (a % i), that is pre-Python 2.6 string formatting
+ (iterpolation), element-wise for a pair of array_likes of str
+ or unicode.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ values : array_like of values
+ These values will be element-wise interpolated into the string.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ See also
+ --------
+ str.__mod__
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, '__mod__', (values,)))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def capitalize(a):
+ """
+ Return a copy of `a` with only the first character of each element
+ capitalized.
+
+ Calls `str.capitalize` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array of strings to capitalize.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input
+ types
+
+ See also
+ --------
+ str.capitalize
+
+ Examples
+ --------
+ >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
+ array(['a1b2', '1b2a', 'b2a1', '2a1b'],
+ dtype='|S4')
+ >>> np.char.capitalize(c)
+ array(['A1b2', '1b2a', 'B2a1', '2a1b'],
+ dtype='|S4')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'capitalize')
+
+
+def _center_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_center_dispatcher)
+def center(a, width, fillchar=' '):
+ """
+ Return a copy of `a` with its elements centered in a string of
+ length `width`.
+
+ Calls `str.center` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The padding character to use (default is space).
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input
+ types
+
+ See also
+ --------
+ str.center
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
+
+
+def _count_dispatcher(a, sub, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_dispatcher)
+def count(a, sub, start=0, end=None):
+ """
+ Returns an array with the number of non-overlapping occurrences of
+ substring `sub` in the range [`start`, `end`].
+
+ Calls `str.count` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+ The substring to search for.
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as slice
+ notation to specify the range in which to count.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints.
+
+ See also
+ --------
+ str.count
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.count(c, 'A')
+ array([3, 1, 1])
+ >>> np.char.count(c, 'aA')
+ array([3, 1, 0])
+ >>> np.char.count(c, 'A', start=1, end=4)
+ array([2, 1, 1])
+ >>> np.char.count(c, 'A', start=1, end=3)
+ array([1, 0, 0])
+
+ """
+ return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
+
+
+def _code_dispatcher(a, encoding=None, errors=None):
+ return (a,)
+
+
+@array_function_dispatch(_code_dispatcher)
+def decode(a, encoding=None, errors=None):
+ """
+ Calls `str.decode` element-wise.
+
+ The set of available codecs comes from the Python standard library,
+ and may be extended at runtime. For more information, see the
+ :mod:`codecs` module.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ encoding : str, optional
+ The name of an encoding
+
+ errors : str, optional
+ Specifies how to handle encoding errors
+
+ Returns
+ -------
+ out : ndarray
+
+ See also
+ --------
+ str.decode
+
+ Notes
+ -----
+ The type of the result will depend on the encoding specified.
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.encode(c, encoding='cp037')
+ array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@',
+ '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'],
+ dtype='|S7')
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
+
+
+@array_function_dispatch(_code_dispatcher)
+def encode(a, encoding=None, errors=None):
+ """
+ Calls `str.encode` element-wise.
+
+ The set of available codecs comes from the Python standard library,
+ and may be extended at runtime. For more information, see the codecs
+ module.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ encoding : str, optional
+ The name of an encoding
+
+ errors : str, optional
+ Specifies how to handle encoding errors
+
+ Returns
+ -------
+ out : ndarray
+
+ See also
+ --------
+ str.encode
+
+ Notes
+ -----
+ The type of the result will depend on the encoding specified.
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
+
+
+def _endswith_dispatcher(a, suffix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_endswith_dispatcher)
+def endswith(a, suffix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `a` ends with `suffix`, otherwise `False`.
+
+ Calls `str.endswith` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ suffix : str
+
+ start, end : int, optional
+ With optional `start`, test beginning at that position. With
+ optional `end`, stop comparing at that position.
+
+ Returns
+ -------
+ out : ndarray
+ Outputs an array of bools.
+
+ See also
+ --------
+ str.endswith
+
+ Examples
+ --------
+ >>> s = np.array(['foo', 'bar'])
+ >>> s[0] = 'foo'
+ >>> s[1] = 'bar'
+ >>> s
+ array(['foo', 'bar'],
+ dtype='|S3')
+ >>> np.char.endswith(s, 'ar')
+ array([False, True])
+ >>> np.char.endswith(s, 'a', start=1, end=2)
+ array([False, True])
+
+ """
+ return _vec_string(
+ a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+
+
+def _expandtabs_dispatcher(a, tabsize=None):
+ return (a,)
+
+
+@array_function_dispatch(_expandtabs_dispatcher)
+def expandtabs(a, tabsize=8):
+ """
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces.
+
+ Calls `str.expandtabs` element-wise.
+
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces, depending on the current column
+ and the given `tabsize`. The column number is reset to zero after
+ each newline occurring in the string. This doesn't understand other
+ non-printing characters or escape sequences.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array
+ tabsize : int, optional
+ Replace tabs with `tabsize` number of spaces. If not given defaults
+ to 8 spaces.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.expandtabs
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'expandtabs', (tabsize,)))
+
+
+@array_function_dispatch(_count_dispatcher)
+def find(a, sub, start=0, end=None):
+ """
+ For each element, return the lowest index in the string where
+ substring `sub` is found.
+
+ Calls `str.find` element-wise.
+
+ For each element, return the lowest index in the string where
+ substring `sub` is found, such that `sub` is contained in the
+ range [`start`, `end`].
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as in
+ slice notation.
+
+ Returns
+ -------
+ out : ndarray or int
+ Output array of ints. Returns -1 if `sub` is not found.
+
+ See also
+ --------
+ str.find
+
+ """
+ return _vec_string(
+ a, integer, 'find', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_count_dispatcher)
+def index(a, sub, start=0, end=None):
+ """
+ Like `find`, but raises `ValueError` when the substring is not found.
+
+ Calls `str.index` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints. Returns -1 if `sub` is not found.
+
+ See also
+ --------
+ find, str.find
+
+ """
+ return _vec_string(
+ a, integer, 'index', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalnum(a):
+ """
+ Returns true for each element if all characters in the string are
+ alphanumeric and there is at least one character, false otherwise.
+
+ Calls `str.isalnum` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.isalnum
+ """
+ return _vec_string(a, bool_, 'isalnum')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalpha(a):
+ """
+ Returns true for each element if all characters in the string are
+ alphabetic and there is at least one character, false otherwise.
+
+ Calls `str.isalpha` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.isalpha
+ """
+ return _vec_string(a, bool_, 'isalpha')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdigit(a):
+ """
+ Returns true for each element if all characters in the string are
+ digits and there is at least one character, false otherwise.
+
+ Calls `str.isdigit` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.isdigit
+ """
+ return _vec_string(a, bool_, 'isdigit')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def islower(a):
+ """
+ Returns true for each element if all cased characters in the
+ string are lowercase and there is at least one cased character,
+ false otherwise.
+
+ Calls `str.islower` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.islower
+ """
+ return _vec_string(a, bool_, 'islower')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isspace(a):
+ """
+ Returns true for each element if there are only whitespace
+ characters in the string and there is at least one character,
+ false otherwise.
+
+ Calls `str.isspace` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.isspace
+ """
+ return _vec_string(a, bool_, 'isspace')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def istitle(a):
+ """
+ Returns true for each element if the element is a titlecased
+ string and there is at least one character, false otherwise.
+
+ Call `str.istitle` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.istitle
+ """
+ return _vec_string(a, bool_, 'istitle')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isupper(a):
+ """
+ Returns true for each element if all cased characters in the
+ string are uppercase and there is at least one character, false
+ otherwise.
+
+ Call `str.isupper` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See also
+ --------
+ str.isupper
+ """
+ return _vec_string(a, bool_, 'isupper')
+
+
+def _join_dispatcher(sep, seq):
+ return (sep, seq)
+
+
+@array_function_dispatch(_join_dispatcher)
+def join(sep, seq):
+ """
+ Return a string which is the concatenation of the strings in the
+ sequence `seq`.
+
+ Calls `str.join` element-wise.
+
+ Parameters
+ ----------
+ sep : array_like of str or unicode
+ seq : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ See also
+ --------
+ str.join
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(sep, object_, 'join', (seq,)))
+
+
+
+def _just_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_just_dispatcher)
+def ljust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` left-justified in a
+ string of length `width`.
+
+ Calls `str.ljust` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.ljust
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def lower(a):
+ """
+ Return an array with the elements converted to lowercase.
+
+ Call `str.lower` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.lower
+
+ Examples
+ --------
+ >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
+ array(['A1B C', '1BCA', 'BCA1'],
+ dtype='|S5')
+ >>> np.char.lower(c)
+ array(['a1b c', '1bca', 'bca1'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'lower')
+
+
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
+def lstrip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the leading characters
+ removed.
+
+ Calls `str.lstrip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like, {str, unicode}
+ Input array.
+
+ chars : {str, unicode}, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a prefix; rather, all combinations of its values are
+ stripped.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.lstrip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'],
+ dtype='|S7')
+
+ The 'a' variable is unstripped from c[1] because whitespace leading.
+
+ >>> np.char.lstrip(c, 'a')
+ array(['AaAaA', ' aA ', 'bBABba'],
+ dtype='|S7')
+
+
+ >>> np.char.lstrip(c, 'A') # leaves c unchanged
+ array(['aAaAaA', ' aA ', 'abBABba'],
+ dtype='|S7')
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
+ ... # XXX: is this a regression? this line now returns False
+ ... # np.char.lstrip(c,'') does not modify c at all.
+ True
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
+ True
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
+
+
+def _partition_dispatcher(a, sep):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, sep):
+ """
+ Partition each element in `a` around `sep`.
+
+ Calls `str.partition` element-wise.
+
+ For each element in `a`, split the element as the first
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array
+ sep : {str, unicode}
+ Separator to split each string element in `a`.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type.
+ The output array will have an extra dimension with 3
+ elements per input element.
+
+ See also
+ --------
+ str.partition
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'partition', (sep,)))
+
+
+def _replace_dispatcher(a, old, new, count=None):
+ return (a,)
+
+
+@array_function_dispatch(_replace_dispatcher)
+def replace(a, old, new, count=None):
+ """
+ For each element in `a`, return a copy of the string with all
+ occurrences of substring `old` replaced by `new`.
+
+ Calls `str.replace` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ old, new : str or unicode
+
+ count : int, optional
+ If the optional argument `count` is given, only the first
+ `count` occurrences are replaced.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.replace
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(
+ a, object_, 'replace', [old, new] + _clean_args(count)))
+
+
+@array_function_dispatch(_count_dispatcher)
+def rfind(a, sub, start=0, end=None):
+ """
+ For each element in `a`, return the highest index in the string
+ where substring `sub` is found, such that `sub` is contained
+ within [`start`, `end`].
+
+ Calls `str.rfind` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as in
+ slice notation.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints. Return -1 on failure.
+
+ See also
+ --------
+ str.rfind
+
+ """
+ return _vec_string(
+ a, integer, 'rfind', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_count_dispatcher)
+def rindex(a, sub, start=0, end=None):
+ """
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
+ not found.
+
+ Calls `str.rindex` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints.
+
+ See also
+ --------
+ rfind, str.rindex
+
+ """
+ return _vec_string(
+ a, integer, 'rindex', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_just_dispatcher)
+def rjust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` right-justified in a
+ string of length `width`.
+
+ Calls `str.rjust` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.rjust
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
+
+
+@array_function_dispatch(_partition_dispatcher)
+def rpartition(a, sep):
+ """
+ Partition (split) each element around the right-most separator.
+
+ Calls `str.rpartition` element-wise.
+
+ For each element in `a`, split the element as the last
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array
+ sep : str or unicode
+ Right-most separator to split each element in array.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of string or unicode, depending on input
+ type. The output array will have an extra dimension with
+ 3 elements per input element.
+
+ See also
+ --------
+ str.rpartition
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'rpartition', (sep,)))
+
+
+def _split_dispatcher(a, sep=None, maxsplit=None):
+ return (a,)
+
+
+@array_function_dispatch(_split_dispatcher)
+def rsplit(a, sep=None, maxsplit=None):
+ """
+ For each element in `a`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ Calls `str.rsplit` element-wise.
+
+ Except for splitting from the right, `rsplit`
+ behaves like `split`.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sep : str or unicode, optional
+ If `sep` is not specified or `None`, any whitespace string
+ is a separator.
+ maxsplit : int, optional
+ If `maxsplit` is given, at most `maxsplit` splits are done,
+ the rightmost ones.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See also
+ --------
+ str.rsplit, split
+
+ """
+ # This will return an array of lists of different sizes, so we
+ # leave it as an object array
+ return _vec_string(
+ a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
+
+
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
+def rstrip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the trailing
+ characters removed.
+
+ Calls `str.rstrip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ chars : str or unicode, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a suffix; rather, all combinations of its values are
+ stripped.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.rstrip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
+ array(['aAaAaA', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.rstrip(c, 'a')
+ array(['aAaAaA', 'abBABb'],
+ dtype='|S7')
+ >>> np.char.rstrip(c, 'A')
+ array(['aAaAa', 'abBABba'],
+ dtype='|S7')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(a, sep=None, maxsplit=None):
+ """
+ For each element in `a`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ Calls `str.split` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sep : str or unicode, optional
+ If `sep` is not specified or `None`, any whitespace string is a
+ separator.
+
+ maxsplit : int, optional
+ If `maxsplit` is given, at most `maxsplit` splits are done.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See also
+ --------
+ str.split, rsplit
+
+ """
+ # This will return an array of lists of different sizes, so we
+ # leave it as an object array
+ return _vec_string(
+ a, object_, 'split', [sep] + _clean_args(maxsplit))
+
+
+def _splitlines_dispatcher(a, keepends=None):
+ return (a,)
+
+
+@array_function_dispatch(_splitlines_dispatcher)
+def splitlines(a, keepends=None):
+ """
+ For each element in `a`, return a list of the lines in the
+ element, breaking at line boundaries.
+
+ Calls `str.splitlines` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ keepends : bool, optional
+ Line breaks are not included in the resulting list unless
+ keepends is given and true.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See also
+ --------
+ str.splitlines
+
+ """
+ return _vec_string(
+ a, object_, 'splitlines', _clean_args(keepends))
+
+
+def _startswith_dispatcher(a, prefix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_startswith_dispatcher)
+def startswith(a, prefix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `a` starts with `prefix`, otherwise `False`.
+
+ Calls `str.startswith` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ prefix : str
+
+ start, end : int, optional
+ With optional `start`, test beginning at that position. With
+ optional `end`, stop comparing at that position.
+
+ Returns
+ -------
+ out : ndarray
+ Array of booleans
+
+ See also
+ --------
+ str.startswith
+
+ """
+ return _vec_string(
+ a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+
+
+@array_function_dispatch(_strip_dispatcher)
+def strip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the leading and
+ trailing characters removed.
+
+ Calls `str.strip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ chars : str or unicode, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a prefix or suffix; rather, all combinations of its
+ values are stripped.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.strip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.strip(c)
+ array(['aAaAaA', 'aA', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
+ array(['AaAaA', ' aA ', 'bBABb'],
+ dtype='|S7')
+ >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
+ array(['aAaAa', ' aA ', 'abBABba'],
+ dtype='|S7')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def swapcase(a):
+ """
+ Return element-wise a copy of the string with
+ uppercase characters converted to lowercase and vice versa.
+
+ Calls `str.swapcase` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.swapcase
+
+ Examples
+ --------
+ >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
+ array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
+ dtype='|S5')
+ >>> np.char.swapcase(c)
+ array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def title(a):
+ """
+ Return element-wise title cased version of string or unicode.
+
+ Title case words start with uppercase characters, all remaining cased
+ characters are lowercase.
+
+ Calls `str.title` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.title
+
+ Examples
+ --------
+ >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
+ array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
+ dtype='|S5')
+ >>> np.char.title(c)
+ array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'title')
+
+
+def _translate_dispatcher(a, table, deletechars=None):
+ return (a,)
+
+
+@array_function_dispatch(_translate_dispatcher)
+def translate(a, table, deletechars=None):
+ """
+ For each element in `a`, return a copy of the string where all
+ characters occurring in the optional argument `deletechars` are
+ removed, and the remaining characters have been mapped through the
+ given translation table.
+
+ Calls `str.translate` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ table : str of length 256
+
+ deletechars : str
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.translate
+
+ """
+ a_arr = numpy.asarray(a)
+ if issubclass(a_arr.dtype.type, unicode_):
+ return _vec_string(
+ a_arr, a_arr.dtype, 'translate', (table,))
+ else:
+ return _vec_string(
+ a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def upper(a):
+ """
+ Return an array with the elements converted to uppercase.
+
+ Calls `str.upper` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.upper
+
+ Examples
+ --------
+ >>> c = np.array(['a1b c', '1bca', 'bca1']); c
+ array(['a1b c', '1bca', 'bca1'],
+ dtype='|S5')
+ >>> np.char.upper(c)
+ array(['A1B C', '1BCA', 'BCA1'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'upper')
+
+
+def _zfill_dispatcher(a, width):
+ return (a,)
+
+
+@array_function_dispatch(_zfill_dispatcher)
+def zfill(a, width):
+ """
+ Return the numeric string left-filled with zeros
+
+ Calls `str.zfill` element-wise.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+ width : int
+ Width of string to left-fill elements in `a`.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See also
+ --------
+ str.zfill
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isnumeric(a):
+ """
+ For each element, return True if there are only numeric
+ characters in the element.
+
+ Calls `unicode.isnumeric` element-wise.
+
+ Numeric characters include digit characters, and all characters
+ that have the Unicode numeric value property, e.g. ``U+2155,
+ VULGAR FRACTION ONE FIFTH``.
+
+ Parameters
+ ----------
+ a : array_like, unicode
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Array of booleans of same shape as `a`.
+
+ See also
+ --------
+ unicode.isnumeric
+
+ """
+ if _use_unicode(a) != unicode_:
+ raise TypeError("isnumeric is only available for Unicode strings and arrays")
+ return _vec_string(a, bool_, 'isnumeric')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdecimal(a):
+ """
+ For each element, return True if there are only decimal
+ characters in the element.
+
+ Calls `unicode.isdecimal` element-wise.
+
+ Decimal characters include digit characters, and all characters
+ that that can be used to form decimal-radix numbers,
+ e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
+
+ Parameters
+ ----------
+ a : array_like, unicode
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Array of booleans identical in shape to `a`.
+
+ See also
+ --------
+ unicode.isdecimal
+
+ """
+ if _use_unicode(a) != unicode_:
+ raise TypeError("isnumeric is only available for Unicode strings and arrays")
+ return _vec_string(a, bool_, 'isdecimal')
+
+
+@set_module('numpy')
+class chararray(ndarray):
+ """
+ chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
+ strides=None, order=None)
+
+ Provides a convenient view on arrays of string and unicode values.
+
+ .. note::
+ The `chararray` class exists for backwards compatibility with
+ Numarray, it is not recommended for new development. Starting from numpy
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
+ `dtype` `object_`, `string_` or `unicode_`, and use the free functions
+ in the `numpy.char` module for fast vectorized string operations.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
+
+ chararrays should be created using `numpy.char.array` or
+ `numpy.char.asarray`, rather than this constructor directly.
+
+ This constructor creates the array, using `buffer` (with `offset`
+ and `strides`) if it is not ``None``. If `buffer` is ``None``, then
+ constructs a new array with `strides` in "C order", unless both
+ ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides`
+ is in "Fortran order".
+
+ Methods
+ -------
+ astype
+ argsort
+ copy
+ count
+ decode
+ dump
+ dumps
+ encode
+ endswith
+ expandtabs
+ fill
+ find
+ flatten
+ getfield
+ index
+ isalnum
+ isalpha
+ isdecimal
+ isdigit
+ islower
+ isnumeric
+ isspace
+ istitle
+ isupper
+ item
+ join
+ ljust
+ lower
+ lstrip
+ nonzero
+ put
+ ravel
+ repeat
+ replace
+ reshape
+ resize
+ rfind
+ rindex
+ rjust
+ rsplit
+ rstrip
+ searchsorted
+ setfield
+ setflags
+ sort
+ split
+ splitlines
+ squeeze
+ startswith
+ strip
+ swapaxes
+ swapcase
+ take
+ title
+ tofile
+ tolist
+ tostring
+ translate
+ transpose
+ upper
+ view
+ zfill
+
+ Parameters
+ ----------
+ shape : tuple
+ Shape of the array.
+ itemsize : int, optional
+ Length of each array element, in number of characters. Default is 1.
+ unicode : bool, optional
+ Are the array elements of type unicode (True) or string (False).
+ Default is False.
+ buffer : int, optional
+ Memory address of the start of the array data. Default is None,
+ in which case a new array is created.
+ offset : int, optional
+ Fixed stride displacement from the beginning of an axis?
+ Default is 0. Needs to be >=0.
+ strides : array_like of ints, optional
+ Strides for the array (see `ndarray.strides` for full description).
+ Default is None.
+ order : {'C', 'F'}, optional
+ The order in which the array data is stored in memory: 'C' ->
+ "row major" order (the default), 'F' -> "column major"
+ (Fortran) order.
+
+ Examples
+ --------
+ >>> charar = np.chararray((3, 3))
+ >>> charar[:] = 'a'
+ >>> charar
+ chararray([['a', 'a', 'a'],
+ ['a', 'a', 'a'],
+ ['a', 'a', 'a']],
+ dtype='|S1')
+
+ >>> charar = np.chararray(charar.shape, itemsize=5)
+ >>> charar[:] = 'abc'
+ >>> charar
+ chararray([['abc', 'abc', 'abc'],
+ ['abc', 'abc', 'abc'],
+ ['abc', 'abc', 'abc']],
+ dtype='|S5')
+
+ """
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
+ offset=0, strides=None, order='C'):
+ global _globalvar
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ # force itemsize to be a Python long, since using NumPy integer
+ # types results in itemsize.itemsize being used as the size of
+ # strings in the new array.
+ itemsize = long(itemsize)
+
+ if sys.version_info[0] >= 3 and isinstance(buffer, _unicode):
+ # On Py3, unicode objects do not have the buffer interface
+ filler = buffer
+ buffer = None
+ else:
+ filler = None
+
+ _globalvar = 1
+ if buffer is None:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ order=order)
+ else:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ buffer=buffer,
+ offset=offset, strides=strides,
+ order=order)
+ if filler is not None:
+ self[...] = filler
+ _globalvar = 0
+ return self
+
+ def __array_finalize__(self, obj):
+ # The b is a special case because it is used for reconstructing.
+ if not _globalvar and self.dtype.char not in 'SUbc':
+ raise ValueError("Can only create a chararray from string data.")
+
+ def __getitem__(self, obj):
+ val = ndarray.__getitem__(self, obj)
+
+ if isinstance(val, character):
+ temp = val.rstrip()
+ if _len(temp) == 0:
+ val = ''
+ else:
+ val = temp
+
+ return val
+
+ # IMPLEMENTATION NOTE: Most of the methods of this class are
+ # direct delegations to the free functions in this module.
+ # However, those that return an array of strings should instead
+ # return a chararray, so some extra wrapping is required.
+
+ def __eq__(self, other):
+ """
+ Return (self == other) element-wise.
+
+ See also
+ --------
+ equal
+ """
+ return equal(self, other)
+
+ def __ne__(self, other):
+ """
+ Return (self != other) element-wise.
+
+ See also
+ --------
+ not_equal
+ """
+ return not_equal(self, other)
+
+ def __ge__(self, other):
+ """
+ Return (self >= other) element-wise.
+
+ See also
+ --------
+ greater_equal
+ """
+ return greater_equal(self, other)
+
+ def __le__(self, other):
+ """
+ Return (self <= other) element-wise.
+
+ See also
+ --------
+ less_equal
+ """
+ return less_equal(self, other)
+
+ def __gt__(self, other):
+ """
+ Return (self > other) element-wise.
+
+ See also
+ --------
+ greater
+ """
+ return greater(self, other)
+
+ def __lt__(self, other):
+ """
+ Return (self < other) element-wise.
+
+ See also
+ --------
+ less
+ """
+ return less(self, other)
+
+ def __add__(self, other):
+ """
+ Return (self + other), that is string concatenation,
+ element-wise for a pair of array_likes of str or unicode.
+
+ See also
+ --------
+ add
+ """
+ return asarray(add(self, other))
+
+ def __radd__(self, other):
+ """
+ Return (other + self), that is string concatenation,
+ element-wise for a pair of array_likes of `string_` or `unicode_`.
+
+ See also
+ --------
+ add
+ """
+ return asarray(add(numpy.asarray(other), self))
+
+ def __mul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __rmul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __mod__(self, i):
+ """
+ Return (self % i), that is pre-Python 2.6 string formatting
+ (iterpolation), element-wise for a pair of array_likes of `string_`
+ or `unicode_`.
+
+ See also
+ --------
+ mod
+ """
+ return asarray(mod(self, i))
+
+ def __rmod__(self, other):
+ return NotImplemented
+
+ def argsort(self, axis=-1, kind='quicksort', order=None):
+ """
+ Return the indices that sort the array lexicographically.
+
+ For full documentation see `numpy.argsort`, for which this method is
+ in fact merely a "thin wrapper."
+
+ Examples
+ --------
+ >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
+ >>> c = c.view(np.chararray); c
+ chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
+ dtype='|S5')
+ >>> c[c.argsort()]
+ chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
+ dtype='|S5')
+
+ """
+ return self.__array__().argsort(axis, kind, order)
+ argsort.__doc__ = ndarray.argsort.__doc__
+
+ def capitalize(self):
+ """
+ Return a copy of `self` with only the first character of each element
+ capitalized.
+
+ See also
+ --------
+ char.capitalize
+
+ """
+ return asarray(capitalize(self))
+
+ def center(self, width, fillchar=' '):
+ """
+ Return a copy of `self` with its elements centered in a
+ string of length `width`.
+
+ See also
+ --------
+ center
+ """
+ return asarray(center(self, width, fillchar))
+
+ def count(self, sub, start=0, end=None):
+ """
+ Returns an array with the number of non-overlapping occurrences of
+ substring `sub` in the range [`start`, `end`].
+
+ See also
+ --------
+ char.count
+
+ """
+ return count(self, sub, start, end)
+
+ def decode(self, encoding=None, errors=None):
+ """
+ Calls `str.decode` element-wise.
+
+ See also
+ --------
+ char.decode
+
+ """
+ return decode(self, encoding, errors)
+
+ def encode(self, encoding=None, errors=None):
+ """
+ Calls `str.encode` element-wise.
+
+ See also
+ --------
+ char.encode
+
+ """
+ return encode(self, encoding, errors)
+
+ def endswith(self, suffix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` ends with `suffix`, otherwise `False`.
+
+ See also
+ --------
+ char.endswith
+
+ """
+ return endswith(self, suffix, start, end)
+
+ def expandtabs(self, tabsize=8):
+ """
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces.
+
+ See also
+ --------
+ char.expandtabs
+
+ """
+ return asarray(expandtabs(self, tabsize))
+
+ def find(self, sub, start=0, end=None):
+ """
+ For each element, return the lowest index in the string where
+ substring `sub` is found.
+
+ See also
+ --------
+ char.find
+
+ """
+ return find(self, sub, start, end)
+
+ def index(self, sub, start=0, end=None):
+ """
+ Like `find`, but raises `ValueError` when the substring is not found.
+
+ See also
+ --------
+ char.index
+
+ """
+ return index(self, sub, start, end)
+
+ def isalnum(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphanumeric and there is at least one character, false
+ otherwise.
+
+ See also
+ --------
+ char.isalnum
+
+ """
+ return isalnum(self)
+
+ def isalpha(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphabetic and there is at least one character, false
+ otherwise.
+
+ See also
+ --------
+ char.isalpha
+
+ """
+ return isalpha(self)
+
+ def isdigit(self):
+ """
+ Returns true for each element if all characters in the string are
+ digits and there is at least one character, false otherwise.
+
+ See also
+ --------
+ char.isdigit
+
+ """
+ return isdigit(self)
+
+ def islower(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are lowercase and there is at least one cased character,
+ false otherwise.
+
+ See also
+ --------
+ char.islower
+
+ """
+ return islower(self)
+
+ def isspace(self):
+ """
+ Returns true for each element if there are only whitespace
+ characters in the string and there is at least one character,
+ false otherwise.
+
+ See also
+ --------
+ char.isspace
+
+ """
+ return isspace(self)
+
+ def istitle(self):
+ """
+ Returns true for each element if the element is a titlecased
+ string and there is at least one character, false otherwise.
+
+ See also
+ --------
+ char.istitle
+
+ """
+ return istitle(self)
+
+ def isupper(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are uppercase and there is at least one character, false
+ otherwise.
+
+ See also
+ --------
+ char.isupper
+
+ """
+ return isupper(self)
+
+ def join(self, seq):
+ """
+ Return a string which is the concatenation of the strings in the
+ sequence `seq`.
+
+ See also
+ --------
+ char.join
+
+ """
+ return join(self, seq)
+
+ def ljust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self` left-justified in a
+ string of length `width`.
+
+ See also
+ --------
+ char.ljust
+
+ """
+ return asarray(ljust(self, width, fillchar))
+
+ def lower(self):
+ """
+ Return an array with the elements of `self` converted to
+ lowercase.
+
+ See also
+ --------
+ char.lower
+
+ """
+ return asarray(lower(self))
+
+ def lstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading characters
+ removed.
+
+ See also
+ --------
+ char.lstrip
+
+ """
+ return asarray(lstrip(self, chars))
+
+ def partition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See also
+ --------
+ partition
+ """
+ return asarray(partition(self, sep))
+
+ def replace(self, old, new, count=None):
+ """
+ For each element in `self`, return a copy of the string with all
+ occurrences of substring `old` replaced by `new`.
+
+ See also
+ --------
+ char.replace
+
+ """
+ return asarray(replace(self, old, new, count))
+
+ def rfind(self, sub, start=0, end=None):
+ """
+ For each element in `self`, return the highest index in the string
+ where substring `sub` is found, such that `sub` is contained
+ within [`start`, `end`].
+
+ See also
+ --------
+ char.rfind
+
+ """
+ return rfind(self, sub, start, end)
+
+ def rindex(self, sub, start=0, end=None):
+ """
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
+ not found.
+
+ See also
+ --------
+ char.rindex
+
+ """
+ return rindex(self, sub, start, end)
+
+ def rjust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self`
+ right-justified in a string of length `width`.
+
+ See also
+ --------
+ char.rjust
+
+ """
+ return asarray(rjust(self, width, fillchar))
+
+ def rpartition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See also
+ --------
+ rpartition
+ """
+ return asarray(rpartition(self, sep))
+
+ def rsplit(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in
+ the string, using `sep` as the delimiter string.
+
+ See also
+ --------
+ char.rsplit
+
+ """
+ return rsplit(self, sep, maxsplit)
+
+ def rstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the trailing
+ characters removed.
+
+ See also
+ --------
+ char.rstrip
+
+ """
+ return asarray(rstrip(self, chars))
+
+ def split(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ See also
+ --------
+ char.split
+
+ """
+ return split(self, sep, maxsplit)
+
+ def splitlines(self, keepends=None):
+ """
+ For each element in `self`, return a list of the lines in the
+ element, breaking at line boundaries.
+
+ See also
+ --------
+ char.splitlines
+
+ """
+ return splitlines(self, keepends)
+
+ def startswith(self, prefix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` starts with `prefix`, otherwise `False`.
+
+ See also
+ --------
+ char.startswith
+
+ """
+ return startswith(self, prefix, start, end)
+
+ def strip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading and
+ trailing characters removed.
+
+ See also
+ --------
+ char.strip
+
+ """
+ return asarray(strip(self, chars))
+
+ def swapcase(self):
+ """
+ For each element in `self`, return a copy of the string with
+ uppercase characters converted to lowercase and vice versa.
+
+ See also
+ --------
+ char.swapcase
+
+ """
+ return asarray(swapcase(self))
+
+ def title(self):
+ """
+ For each element in `self`, return a titlecased version of the
+ string: words start with uppercase characters, all remaining cased
+ characters are lowercase.
+
+ See also
+ --------
+ char.title
+
+ """
+ return asarray(title(self))
+
+ def translate(self, table, deletechars=None):
+ """
+ For each element in `self`, return a copy of the string where
+ all characters occurring in the optional argument
+ `deletechars` are removed, and the remaining characters have
+ been mapped through the given translation table.
+
+ See also
+ --------
+ char.translate
+
+ """
+ return asarray(translate(self, table, deletechars))
+
+ def upper(self):
+ """
+ Return an array with the elements of `self` converted to
+ uppercase.
+
+ See also
+ --------
+ char.upper
+
+ """
+ return asarray(upper(self))
+
+ def zfill(self, width):
+ """
+ Return the numeric string left-filled with zeros in a string of
+ length `width`.
+
+ See also
+ --------
+ char.zfill
+
+ """
+ return asarray(zfill(self, width))
+
+ def isnumeric(self):
+ """
+ For each element in `self`, return True if there are only
+ numeric characters in the element.
+
+ See also
+ --------
+ char.isnumeric
+
+ """
+ return isnumeric(self)
+
+ def isdecimal(self):
+ """
+ For each element in `self`, return True if there are only
+ decimal characters in the element.
+
+ See also
+ --------
+ char.isdecimal
+
+ """
+ return isdecimal(self)
+
+
+def array(obj, itemsize=None, copy=True, unicode=None, order=None):
+ """
+ Create a `chararray`.
+
+ .. note::
+ This class is provided for numarray backward-compatibility.
+ New code (not concerned with numarray compatibility) should use
+ arrays of type `string_` or `unicode_` and use the free functions
+ in :mod:`numpy.char ` for fast
+ vectorized string operations instead.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ copy : bool, optional
+ If true (default), then the object is copied. Otherwise, a copy
+ will only be made if __array__ returns a copy, if obj is a
+ nested sequence, or if a copy is needed to satisfy any of the other
+ requirements (`itemsize`, unicode, `order`, etc.).
+
+ unicode : bool, optional
+ When true, the resulting `chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ `None` and `obj` is one of the following:
+
+ - a `chararray`,
+ - an ndarray of type `str` or `unicode`
+ - a Python str or unicode object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F', 'A'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest). If order is 'A', then the returned array may
+ be in any order (either C-, Fortran-contiguous, or even
+ discontiguous).
+ """
+ if isinstance(obj, (_bytes, _unicode)):
+ if unicode is None:
+ if isinstance(obj, _unicode):
+ unicode = True
+ else:
+ unicode = False
+
+ if itemsize is None:
+ itemsize = _len(obj)
+ shape = _len(obj) // itemsize
+
+ if unicode:
+ if sys.maxunicode == 0xffff:
+ # On a narrow Python build, the buffer for Unicode
+ # strings is UCS2, which doesn't match the buffer for
+ # NumPy Unicode types, which is ALWAYS UCS4.
+ # Therefore, we need to convert the buffer. On Python
+ # 2.6 and later, we can use the utf_32 codec. Earlier
+ # versions don't have that codec, so we convert to a
+ # numerical array that matches the input buffer, and
+ # then use NumPy to convert it to UCS4. All of this
+ # should happen in native endianness.
+ obj = obj.encode('utf_32')
+ else:
+ obj = _unicode(obj)
+ else:
+ # Let the default Unicode -> string encoding (if any) take
+ # precedence.
+ obj = _bytes(obj)
+
+ return chararray(shape, itemsize=itemsize, unicode=unicode,
+ buffer=obj, order=order)
+
+ if isinstance(obj, (list, tuple)):
+ obj = numpy.asarray(obj)
+
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
+ # If we just have a vanilla chararray, create a chararray
+ # view around it.
+ if not isinstance(obj, chararray):
+ obj = obj.view(chararray)
+
+ if itemsize is None:
+ itemsize = obj.itemsize
+ # itemsize is in 8-bit chars, so for Unicode, we need
+ # to divide by the size of a single Unicode character,
+ # which for NumPy is always 4
+ if issubclass(obj.dtype.type, unicode_):
+ itemsize //= 4
+
+ if unicode is None:
+ if issubclass(obj.dtype.type, unicode_):
+ unicode = True
+ else:
+ unicode = False
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ if order is not None:
+ obj = numpy.asarray(obj, order=order)
+ if (copy or
+ (itemsize != obj.itemsize) or
+ (not unicode and isinstance(obj, unicode_)) or
+ (unicode and isinstance(obj, string_))):
+ obj = obj.astype((dtype, long(itemsize)))
+ return obj
+
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
+ if itemsize is None:
+ # Since no itemsize was specified, convert the input array to
+ # a list so the ndarray constructor will automatically
+ # determine the itemsize for us.
+ obj = obj.tolist()
+ # Fall through to the default case
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ if itemsize is None:
+ val = narray(obj, dtype=dtype, order=order, subok=True)
+ else:
+ val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
+ return val.view(chararray)
+
+
+def asarray(obj, itemsize=None, unicode=None, order=None):
+ """
+ Convert the input to a `chararray`, copying the data only if
+ necessary.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ unicode : bool, optional
+ When true, the resulting `chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ `None` and `obj` is one of the following:
+
+ - a `chararray`,
+ - an ndarray of type `str` or 'unicode`
+ - a Python str or unicode object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest).
+ """
+ return array(obj, itemsize, copy=False,
+ unicode=unicode, order=order)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc
new file mode 100644
index 0000000..b1dcebe
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/defchararray.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py
new file mode 100644
index 0000000..c4fc77e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.py
@@ -0,0 +1,1422 @@
+"""
+Implementation of optimized einsum.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import itertools
+
+from numpy.compat import basestring
+from numpy.core.multiarray import c_einsum
+from numpy.core.numeric import asanyarray, tensordot
+from numpy.core.overrides import array_function_dispatch
+
+__all__ = ['einsum', 'einsum_path']
+
+einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+einsum_symbols_set = set(einsum_symbols)
+
+
+def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
+ """
+ Computes the number of FLOPS in the contraction.
+
+ Parameters
+ ----------
+ idx_contraction : iterable
+ The indices involved in the contraction
+ inner : bool
+ Does this contraction require an inner product?
+ num_terms : int
+ The number of terms in a contraction
+ size_dictionary : dict
+ The size of each of the indices in idx_contraction
+
+ Returns
+ -------
+ flop_count : int
+ The total number of FLOPS required for the contraction.
+
+ Examples
+ --------
+
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
+ 90
+
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
+ 270
+
+ """
+
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
+ op_factor = max(1, num_terms - 1)
+ if inner:
+ op_factor += 1
+
+ return overall_size * op_factor
+
+def _compute_size_by_dict(indices, idx_dict):
+ """
+ Computes the product of the elements in indices based on the dictionary
+ idx_dict.
+
+ Parameters
+ ----------
+ indices : iterable
+ Indices to base the product on.
+ idx_dict : dictionary
+ Dictionary of index sizes
+
+ Returns
+ -------
+ ret : int
+ The resulting product.
+
+ Examples
+ --------
+ >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
+ 90
+
+ """
+ ret = 1
+ for i in indices:
+ ret *= idx_dict[i]
+ return ret
+
+
+def _find_contraction(positions, input_sets, output_set):
+ """
+ Finds the contraction for a given set of input and output sets.
+
+ Parameters
+ ----------
+ positions : iterable
+ Integer positions of terms used in the contraction.
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+
+ Returns
+ -------
+ new_result : set
+ The indices of the resulting contraction
+ remaining : list
+ List of sets that have not been contracted, the new set is appended to
+ the end of this list
+ idx_removed : set
+ Indices removed from the entire contraction
+ idx_contraction : set
+ The indices used in the current contraction
+
+ Examples
+ --------
+
+ # A simple dot product test case
+ >>> pos = (0, 1)
+ >>> isets = [set('ab'), set('bc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
+
+ # A more complex case with additional terms in the contraction
+ >>> pos = (0, 2)
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
+ """
+
+ idx_contract = set()
+ idx_remain = output_set.copy()
+ remaining = []
+ for ind, value in enumerate(input_sets):
+ if ind in positions:
+ idx_contract |= value
+ else:
+ remaining.append(value)
+ idx_remain |= value
+
+ new_result = idx_remain & idx_contract
+ idx_removed = (idx_contract - new_result)
+ remaining.append(new_result)
+
+ return (new_result, remaining, idx_removed, idx_contract)
+
+
+def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Computes all possible pair contractions, sieves the results based
+ on ``memory_limit`` and returns the lowest cost path. This algorithm
+ scales factorial with respect to the elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The optimal contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _path__optimal_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ full_results = [(0, [], input_sets)]
+ for iteration in range(len(input_sets) - 1):
+ iter_results = []
+
+ # Compute all unique pairs
+ for curr in full_results:
+ cost, positions, remaining = curr
+ for con in itertools.combinations(range(len(input_sets) - iteration), 2):
+
+ # Find the contraction
+ cont = _find_contraction(con, remaining, output_set)
+ new_result, new_input_sets, idx_removed, idx_contract = cont
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(new_result, idx_dict)
+ if new_size > memory_limit:
+ continue
+
+ # Build (total_cost, positions, indices_remaining)
+ total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
+ new_pos = positions + [con]
+ iter_results.append((total_cost, new_pos, new_input_sets))
+
+ # Update combinatorial list, if we did not find anything return best
+ # path + remaining contractions
+ if iter_results:
+ full_results = iter_results
+ else:
+ path = min(full_results, key=lambda x: x[0])[1]
+ path += [tuple(range(len(input_sets) - iteration))]
+ return path
+
+ # If we have not found anything return single einsum contraction
+ if len(full_results) == 0:
+ return [tuple(range(len(input_sets)))]
+
+ path = min(full_results, key=lambda x: x[0])[1]
+ return path
+
+def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
+ """Compute the cost (removed size + flops) and resultant indices for
+ performing the contraction specified by ``positions``.
+
+ Parameters
+ ----------
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ input_sets : list of sets
+ The indices found on each tensors.
+ output_set : set
+ The output indices of the expression.
+ idx_dict : dict
+ Mapping of each index to its size.
+ memory_limit : int
+ The total allowed size for an intermediary tensor.
+ path_cost : int
+ The contraction cost so far.
+ naive_cost : int
+ The cost of the unoptimized expression.
+
+ Returns
+ -------
+ cost : (int, int)
+ A tuple containing the size of any indices removed, and the flop cost.
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ new_input_sets : list of sets
+ The resulting new list of indices if this proposed contraction is performed.
+
+ """
+
+ # Find the contraction
+ contract = _find_contraction(positions, input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
+ if new_size > memory_limit:
+ return None
+
+ # Build sort tuple
+ old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
+ removed_size = sum(old_sizes) - new_size
+
+ # NB: removed_size used to be just the size of any removed indices i.e.:
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
+ sort = (-removed_size, cost)
+
+ # Sieve based on total cost as well
+ if (path_cost + cost) > naive_cost:
+ return None
+
+ # Add contraction to possible choices
+ return [sort, positions, new_input_sets]
+
+
+def _update_other_results(results, best):
+ """Update the positions and provisional input_sets of ``results`` based on
+ performing the contraction result ``best``. Remove any involving the tensors
+ contracted.
+
+ Parameters
+ ----------
+ results : list
+ List of contraction results produced by ``_parse_possible_contraction``.
+ best : list
+ The best contraction of ``results`` i.e. the one that will be performed.
+
+ Returns
+ -------
+ mod_results : list
+ The list of modifed results, updated with outcome of ``best`` contraction.
+ """
+
+ best_con = best[1]
+ bx, by = best_con
+ mod_results = []
+
+ for cost, (x, y), con_sets in results:
+
+ # Ignore results involving tensors just contracted
+ if x in best_con or y in best_con:
+ continue
+
+ # Update the input_sets
+ del con_sets[by - int(by > x) - int(by > y)]
+ del con_sets[bx - int(bx > x) - int(bx > y)]
+ con_sets.insert(-1, best[2][-1])
+
+ # Update the position indices
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
+ mod_results.append((cost, mod_con, con_sets))
+
+ return mod_results
+
+def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Finds the path by contracting the best pair until the input list is
+ exhausted. The best pair is found by minimizing the tuple
+ ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
+ matrix multiplication or inner product operations, then Hadamard like
+ operations, and finally outer operations. Outer products are limited by
+ ``memory_limit``. This algorithm scales cubically with respect to the
+ number of elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The greedy contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _path__greedy_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ # Handle trivial cases that leaked through
+ if len(input_sets) == 1:
+ return [(0,)]
+ elif len(input_sets) == 2:
+ return [(0, 1)]
+
+ # Build up a naive cost
+ contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+ naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
+
+ # Initially iterate over all pairs
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
+ known_contractions = []
+
+ path_cost = 0
+ path = []
+
+ for iteration in range(len(input_sets) - 1):
+
+ # Iterate over all pairs on first step, only previously found pairs on subsequent steps
+ for positions in comb_iter:
+
+ # Always initially ignore outer products
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
+ continue
+
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
+ naive_cost)
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we do not have a inner contraction, rescan pairs including outer products
+ if len(known_contractions) == 0:
+
+ # Then check the outer products
+ for positions in itertools.combinations(range(len(input_sets)), 2):
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
+ path_cost, naive_cost)
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we still did not find any remaining contractions, default back to einsum like behavior
+ if len(known_contractions) == 0:
+ path.append(tuple(range(len(input_sets))))
+ break
+
+ # Sort based on first index
+ best = min(known_contractions, key=lambda x: x[0])
+
+ # Now propagate as many unused contractions as possible to next iteration
+ known_contractions = _update_other_results(known_contractions, best)
+
+ # Next iteration only compute contractions with the new tensor
+ # All other contractions have been accounted for
+ input_sets = best[2]
+ new_tensor_pos = len(input_sets) - 1
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
+
+ # Update path and total cost
+ path.append(best[1])
+ path_cost += best[0][1]
+
+ return path
+
+
+def _can_dot(inputs, result, idx_removed):
+ """
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+ Parameters
+ ----------
+ inputs : list of str
+ Specifies the subscripts for summation.
+ result : str
+ Resulting summation.
+ idx_removed : set
+ Indices that are removed in the summation
+
+
+ Returns
+ -------
+ type : bool
+ Returns true if BLAS should and can be used, else False
+
+ Notes
+ -----
+ If the operations is BLAS level 1 or 2 and is not already aligned
+ we default back to einsum as the memory movement to copy is more
+ costly than the operation itself.
+
+
+ Examples
+ --------
+
+ # Standard GEMM operation
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+ True
+
+ # Can use the standard BLAS, but requires odd data movement
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+ False
+
+ # DDOT where the memory is not aligned
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+ False
+
+ """
+
+ # All `dot` calls remove indices
+ if len(idx_removed) == 0:
+ return False
+
+ # BLAS can only handle two operands
+ if len(inputs) != 2:
+ return False
+
+ input_left, input_right = inputs
+
+ for c in set(input_left + input_right):
+ # can't deal with repeated indices on same input or more than 2 total
+ nl, nr = input_left.count(c), input_right.count(c)
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
+ return False
+
+ # can't do implicit summation or dimension collapse e.g.
+ # "ab,bc->c" (implicitly sum over 'a')
+ # "ab,ca->ca" (take diagonal of 'a')
+ if nl + nr - 1 == int(c in result):
+ return False
+
+ # Build a few temporaries
+ set_left = set(input_left)
+ set_right = set(input_right)
+ keep_left = set_left - idx_removed
+ keep_right = set_right - idx_removed
+ rs = len(idx_removed)
+
+ # At this point we are a DOT, GEMV, or GEMM operation
+
+ # Handle inner products
+
+ # DDOT with aligned data
+ if input_left == input_right:
+ return True
+
+ # DDOT without aligned data (better to use einsum)
+ if set_left == set_right:
+ return False
+
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+ # GEMM or GEMV no transpose
+ if input_left[-rs:] == input_right[:rs]:
+ return True
+
+ # GEMM or GEMV transpose both
+ if input_left[:rs] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose right
+ if input_left[-rs:] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose left
+ if input_left[:rs] == input_right[:rs]:
+ return True
+
+ # Einsum is faster than GEMV if we have to copy data
+ if not keep_left or not keep_right:
+ return False
+
+ # We are a matrix-matrix product, but we need to copy data
+ return True
+
+
+def _parse_einsum_input(operands):
+ """
+ A reproduction of einsum c side einsum parsing in python.
+
+ Returns
+ -------
+ input_strings : str
+ Parsed input strings
+ output_string : str
+ Parsed output string
+ operands : list of array_like
+ The operands to use in the numpy contraction
+
+ Examples
+ --------
+ The operand list is simplified to reduce printing:
+
+ >>> a = np.random.rand(4, 4)
+ >>> b = np.random.rand(4, 4, 4)
+ >>> __parse_einsum_input(('...a,...a->...', a, b))
+ ('za,xza', 'xz', [a, b])
+
+ >>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
+ ('za,xza', 'xz', [a, b])
+ """
+
+ if len(operands) == 0:
+ raise ValueError("No input operands")
+
+ if isinstance(operands[0], basestring):
+ subscripts = operands[0].replace(" ", "")
+ operands = [asanyarray(v) for v in operands[1:]]
+
+ # Ensure all characters are valid
+ for s in subscripts:
+ if s in '.,->':
+ continue
+ if s not in einsum_symbols:
+ raise ValueError("Character %s is not a valid symbol." % s)
+
+ else:
+ tmp_operands = list(operands)
+ operand_list = []
+ subscript_list = []
+ for p in range(len(operands) // 2):
+ operand_list.append(tmp_operands.pop(0))
+ subscript_list.append(tmp_operands.pop(0))
+
+ output_list = tmp_operands[-1] if len(tmp_operands) else None
+ operands = [asanyarray(v) for v in operand_list]
+ subscripts = ""
+ last = len(subscript_list) - 1
+ for num, sub in enumerate(subscript_list):
+ for s in sub:
+ if s is Ellipsis:
+ subscripts += "..."
+ elif isinstance(s, int):
+ subscripts += einsum_symbols[s]
+ else:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis")
+ if num != last:
+ subscripts += ","
+
+ if output_list is not None:
+ subscripts += "->"
+ for s in output_list:
+ if s is Ellipsis:
+ subscripts += "..."
+ elif isinstance(s, int):
+ subscripts += einsum_symbols[s]
+ else:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis")
+ # Check for proper "->"
+ if ("-" in subscripts) or (">" in subscripts):
+ invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
+ if invalid or (subscripts.count("->") != 1):
+ raise ValueError("Subscripts can only contain one '->'.")
+
+ # Parse ellipses
+ if "." in subscripts:
+ used = subscripts.replace(".", "").replace(",", "").replace("->", "")
+ unused = list(einsum_symbols_set - set(used))
+ ellipse_inds = "".join(unused)
+ longest = 0
+
+ if "->" in subscripts:
+ input_tmp, output_sub = subscripts.split("->")
+ split_subscripts = input_tmp.split(",")
+ out_sub = True
+ else:
+ split_subscripts = subscripts.split(',')
+ out_sub = False
+
+ for num, sub in enumerate(split_subscripts):
+ if "." in sub:
+ if (sub.count(".") != 3) or (sub.count("...") != 1):
+ raise ValueError("Invalid Ellipses.")
+
+ # Take into account numerical values
+ if operands[num].shape == ():
+ ellipse_count = 0
+ else:
+ ellipse_count = max(operands[num].ndim, 1)
+ ellipse_count -= (len(sub) - 3)
+
+ if ellipse_count > longest:
+ longest = ellipse_count
+
+ if ellipse_count < 0:
+ raise ValueError("Ellipses lengths do not match.")
+ elif ellipse_count == 0:
+ split_subscripts[num] = sub.replace('...', '')
+ else:
+ rep_inds = ellipse_inds[-ellipse_count:]
+ split_subscripts[num] = sub.replace('...', rep_inds)
+
+ subscripts = ",".join(split_subscripts)
+ if longest == 0:
+ out_ellipse = ""
+ else:
+ out_ellipse = ellipse_inds[-longest:]
+
+ if out_sub:
+ subscripts += "->" + output_sub.replace("...", out_ellipse)
+ else:
+ # Special care for outputless ellipses
+ output_subscript = ""
+ tmp_subscripts = subscripts.replace(",", "")
+ for s in sorted(set(tmp_subscripts)):
+ if s not in (einsum_symbols):
+ raise ValueError("Character %s is not a valid symbol." % s)
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+ normal_inds = ''.join(sorted(set(output_subscript) -
+ set(out_ellipse)))
+
+ subscripts += "->" + out_ellipse + normal_inds
+
+ # Build output string if does not exist
+ if "->" in subscripts:
+ input_subscripts, output_subscript = subscripts.split("->")
+ else:
+ input_subscripts = subscripts
+ # Build output subscripts
+ tmp_subscripts = subscripts.replace(",", "")
+ output_subscript = ""
+ for s in sorted(set(tmp_subscripts)):
+ if s not in einsum_symbols:
+ raise ValueError("Character %s is not a valid symbol." % s)
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+
+ # Make sure output subscripts are in the input
+ for char in output_subscript:
+ if char not in input_subscripts:
+ raise ValueError("Output character %s did not appear in the input"
+ % char)
+
+ # Make sure number operands is equivalent to the number of terms
+ if len(input_subscripts.split(',')) != len(operands):
+ raise ValueError("Number of einsum subscripts must be equal to the "
+ "number of operands.")
+
+ return (input_subscripts, output_subscript, operands)
+
+
+def _einsum_path_dispatcher(*operands, **kwargs):
+ # NOTE: technically, we should only dispatch on array-like arguments, not
+ # subscripts (given as strings). But separating operands into
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
+ # signatures), so as a practical shortcut we dispatch on everything.
+ # Strings will be ignored for dispatching since they don't define
+ # __array_function__.
+ return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
+def einsum_path(*operands, **kwargs):
+ """
+ einsum_path(subscripts, *operands, optimize='greedy')
+
+ Evaluates the lowest cost contraction order for an einsum expression by
+ considering the creation of intermediate arrays.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation.
+ *operands : list of array_like
+ These are the arrays for the operation.
+ optimize : {bool, list, tuple, 'greedy', 'optimal'}
+ Choose the type of path. If a tuple is provided, the second argument is
+ assumed to be the maximum intermediate size created. If only a single
+ argument is provided the largest input or output array size is used
+ as a maximum intermediate size.
+
+ * if a list is given that starts with ``einsum_path``, uses this as the
+ contraction path
+ * if False no optimization is taken
+ * if True defaults to the 'greedy' algorithm
+ * 'optimal' An algorithm that combinatorially explores all possible
+ ways of contracting the listed tensors and choosest the least costly
+ path. Scales exponentially with the number of terms in the
+ contraction.
+ * 'greedy' An algorithm that chooses the best pair contraction
+ at each step. Effectively, this algorithm searches the largest inner,
+ Hadamard, and then outer products at each step. Scales cubically with
+ the number of terms in the contraction. Equivalent to the 'optimal'
+ path for most contractions.
+
+ Default is 'greedy'.
+
+ Returns
+ -------
+ path : list of tuples
+ A list representation of the einsum path.
+ string_repr : str
+ A printable representation of the einsum path.
+
+ Notes
+ -----
+ The resulting path indicates which terms of the input contraction should be
+ contracted first, the result of this contraction is then appended to the
+ end of the contraction list. This list can then be iterated over until all
+ intermediate contractions are complete.
+
+ See Also
+ --------
+ einsum, linalg.multi_dot
+
+ Examples
+ --------
+
+ We can begin with a chain dot example. In this case, it is optimal to
+ contract the ``b`` and ``c`` tensors first as represented by the first
+ element of the path ``(1, 2)``. The resulting tensor is added to the end
+ of the contraction and the remaining contraction ``(0, 1)`` is then
+ completed.
+
+ >>> a = np.random.rand(2, 2)
+ >>> b = np.random.rand(2, 5)
+ >>> c = np.random.rand(5, 2)
+ >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
+ >>> print(path_info[0])
+ ['einsum_path', (1, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ij,jk,kl->il
+ Naive scaling: 4
+ Optimized scaling: 3
+ Naive FLOP count: 1.600e+02
+ Optimized FLOP count: 5.600e+01
+ Theoretical speedup: 2.857
+ Largest intermediate: 4.000e+00 elements
+ -------------------------------------------------------------------------
+ scaling current remaining
+ -------------------------------------------------------------------------
+ 3 kl,jk->jl ij,jl->il
+ 3 jl,ij->il il->il
+
+
+ A more complex index transformation example.
+
+ >>> I = np.random.rand(10, 10, 10, 10)
+ >>> C = np.random.rand(10, 10)
+ >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
+ optimize='greedy')
+
+ >>> print(path_info[0])
+ ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ea,fb,abcd,gc,hd->efgh
+ Naive scaling: 8
+ Optimized scaling: 5
+ Naive FLOP count: 8.000e+08
+ Optimized FLOP count: 8.000e+05
+ Theoretical speedup: 1000.000
+ Largest intermediate: 1.000e+04 elements
+ --------------------------------------------------------------------------
+ scaling current remaining
+ --------------------------------------------------------------------------
+ 5 abcd,ea->bcde fb,gc,hd,bcde->efgh
+ 5 bcde,fb->cdef gc,hd,cdef->efgh
+ 5 cdef,gc->defg hd,defg->efgh
+ 5 defg,hd->efgh efgh->efgh
+ """
+
+ # Make sure all keywords are valid
+ valid_contract_kwargs = ['optimize', 'einsum_call']
+ unknown_kwargs = [k for (k, v) in kwargs.items() if k
+ not in valid_contract_kwargs]
+ if len(unknown_kwargs):
+ raise TypeError("Did not understand the following kwargs:"
+ " %s" % unknown_kwargs)
+
+ # Figure out what the path really is
+ path_type = kwargs.pop('optimize', True)
+ if path_type is True:
+ path_type = 'greedy'
+ if path_type is None:
+ path_type = False
+
+ memory_limit = None
+
+ # No optimization or a named path algorithm
+ if (path_type is False) or isinstance(path_type, basestring):
+ pass
+
+ # Given an explicit path
+ elif len(path_type) and (path_type[0] == 'einsum_path'):
+ pass
+
+ # Path tuple with memory limit
+ elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and
+ isinstance(path_type[1], (int, float))):
+ memory_limit = int(path_type[1])
+ path_type = path_type[0]
+
+ else:
+ raise TypeError("Did not understand the path: %s" % str(path_type))
+
+ # Hidden option, only einsum should call this
+ einsum_call_arg = kwargs.pop("einsum_call", False)
+
+ # Python side parsing
+ input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
+
+ # Build a few useful list and sets
+ input_list = input_subscripts.split(',')
+ input_sets = [set(x) for x in input_list]
+ output_set = set(output_subscript)
+ indices = set(input_subscripts.replace(',', ''))
+
+ # Get length of each unique dimension and ensure all dimensions are correct
+ dimension_dict = {}
+ broadcast_indices = [[] for x in range(len(input_list))]
+ for tnum, term in enumerate(input_list):
+ sh = operands[tnum].shape
+ if len(sh) != len(term):
+ raise ValueError("Einstein sum subscript %s does not contain the "
+ "correct number of indices for operand %d."
+ % (input_subscripts[tnum], tnum))
+ for cnum, char in enumerate(term):
+ dim = sh[cnum]
+
+ # Build out broadcast indices
+ if dim == 1:
+ broadcast_indices[tnum].append(char)
+
+ if char in dimension_dict.keys():
+ # For broadcasting cases we always want the largest dim size
+ if dimension_dict[char] == 1:
+ dimension_dict[char] = dim
+ elif dim not in (1, dimension_dict[char]):
+ raise ValueError("Size of label '%s' for operand %d (%d) "
+ "does not match previous terms (%d)."
+ % (char, tnum, dimension_dict[char], dim))
+ else:
+ dimension_dict[char] = dim
+
+ # Convert broadcast inds to sets
+ broadcast_indices = [set(x) for x in broadcast_indices]
+
+ # Compute size of each input array plus the output array
+ size_list = [_compute_size_by_dict(term, dimension_dict)
+ for term in input_list + [output_subscript]]
+ max_size = max(size_list)
+
+ if memory_limit is None:
+ memory_arg = max_size
+ else:
+ memory_arg = memory_limit
+
+ # Compute naive cost
+ # This isn't quite right, need to look into exactly how einsum does this
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
+ naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
+
+ # Compute the path
+ if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
+ # Nothing to be optimized, leave it to einsum
+ path = [tuple(range(len(input_list)))]
+ elif path_type == "greedy":
+ path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
+ elif path_type == "optimal":
+ path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
+ elif path_type[0] == 'einsum_path':
+ path = path_type[1:]
+ else:
+ raise KeyError("Path name %s not found", path_type)
+
+ cost_list, scale_list, size_list, contraction_list = [], [], [], []
+
+ # Build contraction tuple (positions, gemm, einsum_str, remaining)
+ for cnum, contract_inds in enumerate(path):
+ # Make sure we remove inds from right to left
+ contract_inds = tuple(sorted(list(contract_inds), reverse=True))
+
+ contract = _find_contraction(contract_inds, input_sets, output_set)
+ out_inds, input_sets, idx_removed, idx_contract = contract
+
+ cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
+ cost_list.append(cost)
+ scale_list.append(len(idx_contract))
+ size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
+
+ bcast = set()
+ tmp_inputs = []
+ for x in contract_inds:
+ tmp_inputs.append(input_list.pop(x))
+ bcast |= broadcast_indices.pop(x)
+
+ new_bcast_inds = bcast - idx_removed
+
+ # If we're broadcasting, nix blas
+ if not len(idx_removed & bcast):
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+ else:
+ do_blas = False
+
+ # Last contraction
+ if (cnum - len(path)) == -1:
+ idx_result = output_subscript
+ else:
+ sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
+ idx_result = "".join([x[1] for x in sorted(sort_result)])
+
+ input_list.append(idx_result)
+ broadcast_indices.append(new_bcast_inds)
+ einsum_str = ",".join(tmp_inputs) + "->" + idx_result
+
+ contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
+ contraction_list.append(contraction)
+
+ opt_cost = sum(cost_list) + 1
+
+ if einsum_call_arg:
+ return (operands, contraction_list)
+
+ # Return the path along with a nice string representation
+ overall_contraction = input_subscripts + "->" + output_subscript
+ header = ("scaling", "current", "remaining")
+
+ speedup = naive_cost / opt_cost
+ max_i = max(size_list)
+
+ path_print = " Complete contraction: %s\n" % overall_contraction
+ path_print += " Naive scaling: %d\n" % len(indices)
+ path_print += " Optimized scaling: %d\n" % max(scale_list)
+ path_print += " Naive FLOP count: %.3e\n" % naive_cost
+ path_print += " Optimized FLOP count: %.3e\n" % opt_cost
+ path_print += " Theoretical speedup: %3.3f\n" % speedup
+ path_print += " Largest intermediate: %.3e elements\n" % max_i
+ path_print += "-" * 74 + "\n"
+ path_print += "%6s %24s %40s\n" % header
+ path_print += "-" * 74
+
+ for n, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ remaining_str = ",".join(remaining) + "->" + output_subscript
+ path_run = (scale_list[n], einsum_str, remaining_str)
+ path_print += "\n%4d %24s %40s" % path_run
+
+ path = ['einsum_path'] + path
+ return (path, path_print)
+
+
+def _einsum_dispatcher(*operands, **kwargs):
+ # Arguably we dispatch on more arguments that we really should; see note in
+ # _einsum_path_dispatcher for why.
+ for op in operands:
+ yield op
+ yield kwargs.get('out')
+
+
+# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
+def einsum(*operands, **kwargs):
+ """
+ einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe', optimize=False)
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout as the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) `. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) `. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) `.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ .. versionadded:: 1.10.0
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ .. versionadded:: 1.12.0
+
+ Added the ``optimize`` argument which will optimize the contraction order
+ of an einsum expression. For a contraction with three or more operands this
+ can greatly increase the computational efficiency at the cost of a larger
+ memory footprint during computation.
+
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
+ returns the optimal path in the majority of cases. In some cases 'optimal'
+ will return the superlative path through a more expensive, exhaustive search.
+ For iterative calculations it may be advisable to calculate the optimal path
+ once and reuse that path by supplying it as an argument. An example is given
+ below.
+
+ See :py:func:`numpy.einsum_path` for more details.
+
+ Examples
+ --------
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ Chained array operations. For more complicated contractions, speed ups
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing the
+ 'optimal' path and repeatedly applying it, using an
+ `einsum_path` insertion (since version 1.12.0). Performance improvements can be
+ particularly significant with larger arrays:
+
+ >>> a = np.ones(64).reshape(2,4,8)
+ # Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+ # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
+ # Greedy `einsum` (faster optimal path approximation): ~160ms
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+ # Optimal `einsum` (best usage pattern in some use cases): ~110ms
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
+
+ """
+
+ # Grab non-einsum kwargs; do not optimize by default.
+ optimize_arg = kwargs.pop('optimize', False)
+
+ # If no optimization, run pure einsum
+ if optimize_arg is False:
+ return c_einsum(*operands, **kwargs)
+
+ valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
+ einsum_kwargs = {k: v for (k, v) in kwargs.items() if
+ k in valid_einsum_kwargs}
+
+ # Make sure all keywords are valid
+ valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
+ unknown_kwargs = [k for (k, v) in kwargs.items() if
+ k not in valid_contract_kwargs]
+
+ if len(unknown_kwargs):
+ raise TypeError("Did not understand the following kwargs: %s"
+ % unknown_kwargs)
+
+ # Special handeling if out is specified
+ specified_out = False
+ out_array = einsum_kwargs.pop('out', None)
+ if out_array is not None:
+ specified_out = True
+
+ # Build the contraction list and operand
+ operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
+ einsum_call=True)
+
+ handle_out = False
+
+ # Start contraction loop
+ for num, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ tmp_operands = [operands.pop(x) for x in inds]
+
+ # Do we need to deal with the output?
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
+
+ # Call tensordot if still possible
+ if blas:
+ # Checks have already been handled
+ input_str, results_index = einsum_str.split('->')
+ input_left, input_right = input_str.split(',')
+
+ tensor_result = input_left + input_right
+ for s in idx_rm:
+ tensor_result = tensor_result.replace(s, "")
+
+ # Find indices to contract over
+ left_pos, right_pos = [], []
+ for s in sorted(idx_rm):
+ left_pos.append(input_left.find(s))
+ right_pos.append(input_right.find(s))
+
+ # Contract!
+ new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
+
+ # Build a new view if needed
+ if (tensor_result != results_index) or handle_out:
+ if handle_out:
+ einsum_kwargs["out"] = out_array
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
+
+ # Call einsum
+ else:
+ # If out was specified
+ if handle_out:
+ einsum_kwargs["out"] = out_array
+
+ # Do the contraction
+ new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
+
+ # Append new items and dereference what we can
+ operands.append(new_view)
+ del tmp_operands, new_view
+
+ if specified_out:
+ return out_array
+ else:
+ return operands[0]
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc
new file mode 100644
index 0000000..8368b3f
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/einsumfunc.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py
new file mode 100644
index 0000000..59a820d
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.py
@@ -0,0 +1,3459 @@
+"""Module containing non-deprecated functions borrowed from Numeric.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import functools
+import types
+import warnings
+
+import numpy as np
+from .. import VisibleDeprecationWarning
+from . import multiarray as mu
+from . import overrides
+from . import umath as um
+from . import numerictypes as nt
+from .numeric import asarray, array, asanyarray, concatenate
+from . import _methods
+
+_dt_ = nt.sctype2char
+
+# functions that are methods
+__all__ = [
+ 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
+ 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
+ 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
+ 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
+ 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
+]
+
+_gentype = types.GeneratorType
+# save away Python sum
+_sum_ = sum
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+# functions that are now methods
+def _wrapit(obj, method, *args, **kwds):
+ try:
+ wrap = obj.__array_wrap__
+ except AttributeError:
+ wrap = None
+ result = getattr(asarray(obj), method)(*args, **kwds)
+ if wrap:
+ if not isinstance(result, mu.ndarray):
+ result = asarray(result)
+ result = wrap(result)
+ return result
+
+
+def _wrapfunc(obj, method, *args, **kwds):
+ try:
+ return getattr(obj, method)(*args, **kwds)
+
+ # An AttributeError occurs if the object does not have
+ # such a method in its class.
+
+ # A TypeError occurs if the object does have such a method
+ # in its class, but its signature is not identical to that
+ # of NumPy's. This situation has occurred in the case of
+ # a downstream library like 'pandas'.
+ except (AttributeError, TypeError):
+ return _wrapit(obj, method, *args, **kwds)
+
+
+def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
+
+ if type(obj) is not mu.ndarray:
+ try:
+ reduction = getattr(obj, method)
+ except AttributeError:
+ pass
+ else:
+ # This branch is needed for reductions like any which don't
+ # support a dtype.
+ if dtype is not None:
+ return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
+ else:
+ return reduction(axis=axis, out=out, **passkwargs)
+
+ return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+
+
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+ return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
+def take(a, indices, axis=None, out=None, mode='raise'):
+ """
+ Take elements from an array along an axis.
+
+ When axis is not None, this function does the same thing as "fancy"
+ indexing (indexing arrays using arrays); however, it can be easier to use
+ if you need elements along a given axis. A call such as
+ ``np.take(arr, indices, axis=3)`` is equivalent to
+ ``arr[:,:,:,indices,...]``.
+
+ Explained without fancy indexing, this is equivalent to the following use
+ of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
+ indices::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ Nj = indices.shape
+ for ii in ndindex(Ni):
+ for jj in ndindex(Nj):
+ for kk in ndindex(Nk):
+ out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
+
+ Parameters
+ ----------
+ a : array_like (Ni..., M, Nk...)
+ The source array.
+ indices : array_like (Nj...)
+ The indices of the values to extract.
+
+ .. versionadded:: 1.8.0
+
+ Also allow scalars for indices.
+ axis : int, optional
+ The axis over which to select values. By default, the flattened
+ input array is used.
+ out : ndarray, optional (Ni..., Nj..., Nk...)
+ If provided, the result will be placed in this array. It should
+ be of the appropriate shape and dtype.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers.
+
+ Returns
+ -------
+ out : ndarray (Ni..., Nj..., Nk...)
+ The returned array has the same type as `a`.
+
+ See Also
+ --------
+ compress : Take elements using a boolean mask
+ ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
+
+ Notes
+ -----
+
+ By eliminating the inner loop in the description above, and using `s_` to
+ build simple slice objects, `take` can be expressed in terms of applying
+ fancy indexing to each 1-d slice::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nj):
+ out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
+
+ For this reason, it is equivalent to (but faster than) the following use
+ of `apply_along_axis`::
+
+ out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
+
+ Examples
+ --------
+ >>> a = [4, 3, 5, 7, 6, 8]
+ >>> indices = [0, 1, 4]
+ >>> np.take(a, indices)
+ array([4, 3, 6])
+
+ In this example if `a` is an ndarray, "fancy" indexing can be used.
+
+ >>> a = np.array(a)
+ >>> a[indices]
+ array([4, 3, 6])
+
+ If `indices` is not one dimensional, the output also has these dimensions.
+
+ >>> np.take(a, [[0, 1], [2, 3]])
+ array([[4, 3],
+ [5, 7]])
+ """
+ return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+
+
+def _reshape_dispatcher(a, newshape, order=None):
+ return (a,)
+
+
+# not deprecated --- copy if necessary, view otherwise
+@array_function_dispatch(_reshape_dispatcher)
+def reshape(a, newshape, order='C'):
+ """
+ Gives a new shape to an array without changing its data.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be reshaped.
+ newshape : int or tuple of ints
+ The new shape should be compatible with the original shape. If
+ an integer, then the result will be a 1-D array of that length.
+ One shape dimension can be -1. In this case, the value is
+ inferred from the length of the array and remaining dimensions.
+ order : {'C', 'F', 'A'}, optional
+ Read the elements of `a` using this index order, and place the
+ elements into the reshaped array using this index order. 'C'
+ means to read / write the elements using C-like index order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to read / write the
+ elements using Fortran-like index order, with the first index
+ changing fastest, and the last index changing slowest. Note that
+ the 'C' and 'F' options take no account of the memory layout of
+ the underlying array, and only refer to the order of indexing.
+ 'A' means to read / write the elements in Fortran-like index
+ order if `a` is Fortran *contiguous* in memory, C-like order
+ otherwise.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ This will be a new view object if possible; otherwise, it will
+ be a copy. Note there is no guarantee of the *memory layout* (C- or
+ Fortran- contiguous) of the returned array.
+
+ See Also
+ --------
+ ndarray.reshape : Equivalent method.
+
+ Notes
+ -----
+ It is not always possible to change the shape of an array without
+ copying the data. If you want an error to be raised when the data is copied,
+ you should assign the new shape to the shape attribute of the array::
+
+ >>> a = np.zeros((10, 2))
+ # A transpose makes the array non-contiguous
+ >>> b = a.T
+ # Taking a view makes it possible to modify the shape without modifying
+ # the initial object.
+ >>> c = b.view()
+ >>> c.shape = (20)
+ AttributeError: incompatible shape for a non-contiguous array
+
+ The `order` keyword gives the index ordering both for *fetching* the values
+ from `a`, and then *placing* the values into the output array.
+ For example, let's say you have an array:
+
+ >>> a = np.arange(6).reshape((3, 2))
+ >>> a
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+
+ You can think of reshaping as first raveling the array (using the given
+ index order), then inserting the elements from the raveled array into the
+ new array using the same kind of index ordering as was used for the
+ raveling.
+
+ >>> np.reshape(a, (2, 3)) # C-like index ordering
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
+ array([[0, 4, 3],
+ [2, 1, 5]])
+ >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
+ array([[0, 4, 3],
+ [2, 1, 5]])
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> np.reshape(a, 6)
+ array([1, 2, 3, 4, 5, 6])
+ >>> np.reshape(a, 6, order='F')
+ array([1, 4, 2, 5, 3, 6])
+
+ >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ """
+ return _wrapfunc(a, 'reshape', newshape, order=order)
+
+
+def _choose_dispatcher(a, choices, out=None, mode=None):
+ yield a
+ for c in choices:
+ yield c
+ yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
+def choose(a, choices, out=None, mode='raise'):
+ """
+ Construct an array from an index array and a set of arrays to choose from.
+
+ First of all, if confused or uncertain, definitely look at the Examples -
+ in its full generality, this function is less simple than it might
+ seem from the following code description (below ndi =
+ `numpy.lib.index_tricks`):
+
+ ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
+
+ But this omits some subtleties. Here is a fully general summary:
+
+ Given an "index" array (`a`) of integers and a sequence of `n` arrays
+ (`choices`), `a` and each choice array are first broadcast, as necessary,
+ to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
+ 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
+ for each `i`. Then, a new array with shape ``Ba.shape`` is created as
+ follows:
+
+ * if ``mode=raise`` (the default), then, first of all, each element of
+ `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
+ `i` (in that range) is the value at the `(j0, j1, ..., jm)` position
+ in `Ba` - then the value at the same position in the new array is the
+ value in `Bchoices[i]` at that same position;
+
+ * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
+ integer; modular arithmetic is used to map integers outside the range
+ `[0, n-1]` back into that range; and then the new array is constructed
+ as above;
+
+ * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
+ integer; negative integers are mapped to 0; values greater than `n-1`
+ are mapped to `n-1`; and then the new array is constructed as above.
+
+ Parameters
+ ----------
+ a : int array
+ This array must contain integers in `[0, n-1]`, where `n` is the number
+ of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
+ integers are permissible.
+ choices : sequence of arrays
+ Choice arrays. `a` and all of the choices must be broadcastable to the
+ same shape. If `choices` is itself an array (not recommended), then
+ its outermost dimension (i.e., the one corresponding to
+ ``choices.shape[0]``) is taken as defining the "sequence".
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+ mode : {'raise' (default), 'wrap', 'clip'}, optional
+ Specifies how indices outside `[0, n-1]` will be treated:
+
+ * 'raise' : an exception is raised
+ * 'wrap' : value becomes value mod `n`
+ * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
+
+ Returns
+ -------
+ merged_array : array
+ The merged result.
+
+ Raises
+ ------
+ ValueError: shape mismatch
+ If `a` and each choice array are not all broadcastable to the same
+ shape.
+
+ See Also
+ --------
+ ndarray.choose : equivalent method
+
+ Notes
+ -----
+ To reduce the chance of misinterpretation, even though the following
+ "abuse" is nominally supported, `choices` should neither be, nor be
+ thought of as, a single array, i.e., the outermost sequence-like container
+ should be either a list or a tuple.
+
+ Examples
+ --------
+
+ >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ ... [20, 21, 22, 23], [30, 31, 32, 33]]
+ >>> np.choose([2, 3, 1, 0], choices
+ ... # the first element of the result will be the first element of the
+ ... # third (2+1) "array" in choices, namely, 20; the second element
+ ... # will be the second element of the fourth (3+1) choice array, i.e.,
+ ... # 31, etc.
+ ... )
+ array([20, 31, 12, 3])
+ >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
+ array([20, 31, 12, 3])
+ >>> # because there are 4 choice arrays
+ >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
+ array([20, 1, 12, 3])
+ >>> # i.e., 0
+
+ A couple examples illustrating how choose broadcasts:
+
+ >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
+ >>> choices = [-10, 10]
+ >>> np.choose(a, choices)
+ array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]])
+
+ >>> # With thanks to Anne Archibald
+ >>> a = np.array([0, 1]).reshape((2,1,1))
+ >>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
+ >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
+ >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
+ array([[[ 1, 1, 1, 1, 1],
+ [ 2, 2, 2, 2, 2],
+ [ 3, 3, 3, 3, 3]],
+ [[-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5]]])
+
+ """
+ return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+
+
+def _repeat_dispatcher(a, repeats, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
+def repeat(a, repeats, axis=None):
+ """
+ Repeat elements of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ repeats : int or array of ints
+ The number of repetitions for each element. `repeats` is broadcasted
+ to fit the shape of the given axis.
+ axis : int, optional
+ The axis along which to repeat values. By default, use the
+ flattened input array, and return a flat output array.
+
+ Returns
+ -------
+ repeated_array : ndarray
+ Output array which has the same shape as `a`, except along
+ the given axis.
+
+ See Also
+ --------
+ tile : Tile an array.
+
+ Examples
+ --------
+ >>> np.repeat(3, 4)
+ array([3, 3, 3, 3])
+ >>> x = np.array([[1,2],[3,4]])
+ >>> np.repeat(x, 2)
+ array([1, 1, 2, 2, 3, 3, 4, 4])
+ >>> np.repeat(x, 3, axis=1)
+ array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]])
+ >>> np.repeat(x, [1, 2], axis=0)
+ array([[1, 2],
+ [3, 4],
+ [3, 4]])
+
+ """
+ return _wrapfunc(a, 'repeat', repeats, axis=axis)
+
+
+def _put_dispatcher(a, ind, v, mode=None):
+ return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
+def put(a, ind, v, mode='raise'):
+ """
+ Replaces specified elements of an array with given values.
+
+ The indexing works on the flattened target array. `put` is roughly
+ equivalent to:
+
+ ::
+
+ a.flat[ind] = v
+
+ Parameters
+ ----------
+ a : ndarray
+ Target array.
+ ind : array_like
+ Target indices, interpreted as integers.
+ v : array_like
+ Values to place in `a` at target indices. If `v` is shorter than
+ `ind` it will be repeated as necessary.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers.
+
+ See Also
+ --------
+ putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
+
+ Examples
+ --------
+ >>> a = np.arange(5)
+ >>> np.put(a, [0, 2], [-44, -55])
+ >>> a
+ array([-44, 1, -55, 3, 4])
+
+ >>> a = np.arange(5)
+ >>> np.put(a, 22, -5, mode='clip')
+ >>> a
+ array([ 0, 1, 2, 3, -5])
+
+ """
+ try:
+ put = a.put
+ except AttributeError:
+ raise TypeError("argument 1 must be numpy.ndarray, "
+ "not {name}".format(name=type(a).__name__))
+
+ return put(ind, v, mode=mode)
+
+
+def _swapaxes_dispatcher(a, axis1, axis2):
+ return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
+def swapaxes(a, axis1, axis2):
+ """
+ Interchange two axes of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis1 : int
+ First axis.
+ axis2 : int
+ Second axis.
+
+ Returns
+ -------
+ a_swapped : ndarray
+ For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
+ returned; otherwise a new array is created. For earlier NumPy
+ versions a view of `a` is returned only if the order of the
+ axes is changed, otherwise the input array is returned.
+
+ Examples
+ --------
+ >>> x = np.array([[1,2,3]])
+ >>> np.swapaxes(x,0,1)
+ array([[1],
+ [2],
+ [3]])
+
+ >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
+ >>> x
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ >>> np.swapaxes(x,0,2)
+ array([[[0, 4],
+ [2, 6]],
+ [[1, 5],
+ [3, 7]]])
+
+ """
+ return _wrapfunc(a, 'swapaxes', axis1, axis2)
+
+
+def _transpose_dispatcher(a, axes=None):
+ return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
+def transpose(a, axes=None):
+ """
+ Permute the dimensions of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axes : list of ints, optional
+ By default, reverse the dimensions, otherwise permute the axes
+ according to the values given.
+
+ Returns
+ -------
+ p : ndarray
+ `a` with its axes permuted. A view is returned whenever
+ possible.
+
+ See Also
+ --------
+ moveaxis
+ argsort
+
+ Notes
+ -----
+ Use `transpose(a, argsort(axes))` to invert the transposition of tensors
+ when using the `axes` keyword argument.
+
+ Transposing a 1-D array returns an unchanged view of the original array.
+
+ Examples
+ --------
+ >>> x = np.arange(4).reshape((2,2))
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+
+ >>> np.transpose(x)
+ array([[0, 2],
+ [1, 3]])
+
+ >>> x = np.ones((1, 2, 3))
+ >>> np.transpose(x, (1, 0, 2)).shape
+ (2, 1, 3)
+
+ """
+ return _wrapfunc(a, 'transpose', axes)
+
+
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Return a partitioned copy of an array.
+
+ Creates a copy of the array with its elements rearranged in such a
+ way that the value of the element in k-th position is in the
+ position it would be in a sorted array. All elements smaller than
+ the k-th element are moved before this element and all equal or
+ greater are moved behind it. The ordering of the elements in the two
+ partitions is undefined.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th value of the element
+ will be in its final sorted position and all smaller elements
+ will be moved before it and all equal or greater elements behind
+ it. The order of all elements in the partitions is undefined. If
+ provided with a sequence of k-th it will partition all elements
+ indexed by k-th of them into their sorted position at once.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string. Not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ partitioned_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.partition : Method to sort an array in-place.
+ argpartition : Indirect partition.
+ sort : Full sorting
+
+ Notes
+ -----
+ The various selection algorithms are characterized by their average
+ speed, worst case performance, work space size, and whether they are
+ stable. A stable sort keeps items with the same key in the same
+ relative order. The available algorithms have the following
+ properties:
+
+ ================= ======= ============= ============ =======
+ kind speed worst case work space stable
+ ================= ======= ============= ============ =======
+ 'introselect' 1 O(n) 0 no
+ ================= ======= ============= ============ =======
+
+ All the partition algorithms make temporary copies of the data when
+ partitioning along any but the last axis. Consequently,
+ partitioning along the last axis is faster and uses less space than
+ partitioning along any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the
+ real and imaginary parts are non-nan then the order is determined by
+ the real parts except when they are equal, in which case the order
+ is determined by the imaginary parts.
+
+ Examples
+ --------
+ >>> a = np.array([3, 4, 2, 1])
+ >>> np.partition(a, 3)
+ array([2, 1, 3, 4])
+
+ >>> np.partition(a, (1, 3))
+ array([1, 2, 3, 4])
+
+ """
+ if axis is None:
+ # flatten returns (1, N) for np.matrix, so always use the last axis
+ a = asanyarray(a).flatten()
+ axis = -1
+ else:
+ a = asanyarray(a).copy(order="K")
+ a.partition(kth, axis=axis, kind=kind, order=order)
+ return a
+
+
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
+def argpartition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Perform an indirect partition along the given axis using the
+ algorithm specified by the `kind` keyword. It returns an array of
+ indices of the same shape as `a` that index data along the given
+ axis in partitioned order.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to sort.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th element will be in its
+ final sorted position and all smaller elements will be moved
+ before it and all larger elements behind it. The order all
+ elements in the partitions is undefined. If provided with a
+ sequence of k-th it will partition all of them into their sorted
+ position at once.
+ axis : int or None, optional
+ Axis along which to sort. The default is -1 (the last axis). If
+ None, the flattened array is used.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string, and not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that partition `a` along the specified axis.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
+ yields the partitioned `a`, irrespective of dimensionality.
+
+ See Also
+ --------
+ partition : Describes partition algorithms used.
+ ndarray.partition : Inplace partition.
+ argsort : Full indirect sort
+
+ Notes
+ -----
+ See `partition` for notes on the different selection algorithms.
+
+ Examples
+ --------
+ One dimensional array:
+
+ >>> x = np.array([3, 4, 2, 1])
+ >>> x[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+ >>> x[np.argpartition(x, (1, 3))]
+ array([1, 2, 3, 4])
+
+ >>> x = [3, 4, 2, 1]
+ >>> np.array(x)[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+
+ """
+ return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
+
+
+def _sort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
+def sort(a, axis=-1, kind='quicksort', order=None):
+ """
+ Return a sorted copy of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. Default is 'quicksort'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ sorted_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.sort : Method to sort an array in-place.
+ argsort : Indirect sort.
+ lexsort : Indirect stable sort on multiple keys.
+ searchsorted : Find elements in a sorted array.
+ partition : Partial sort.
+
+ Notes
+ -----
+ The various sorting algorithms are characterized by their average speed,
+ worst case performance, work space size, and whether they are stable. A
+ stable sort keeps items with the same key in the same relative
+ order. The three available algorithms have the following
+ properties:
+
+ =========== ======= ============= ============ ========
+ kind speed worst case work space stable
+ =========== ======= ============= ============ ========
+ 'quicksort' 1 O(n^2) 0 no
+ 'mergesort' 2 O(n*log(n)) ~n/2 yes
+ 'heapsort' 3 O(n*log(n)) 0 no
+ =========== ======= ============= ============ ========
+
+ All the sort algorithms make temporary copies of the data when
+ sorting along any but the last axis. Consequently, sorting along
+ the last axis is faster and uses less space than sorting along
+ any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the real
+ and imaginary parts are non-nan then the order is determined by the
+ real parts except when they are equal, in which case the order is
+ determined by the imaginary parts.
+
+ Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+ values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+ values are sorted to the end. The extended sort order is:
+
+ * Real: [R, nan]
+ * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+ where R is a non-nan real value. Complex values with the same nan
+ placements are sorted according to the non-nan part if it exists.
+ Non-nan values are sorted as before.
+
+ .. versionadded:: 1.12.0
+
+ quicksort has been changed to an introsort which will switch
+ heapsort when it does not make enough progress. This makes its
+ worst case O(n*log(n)).
+
+ 'stable' automatically choses the best stable sorting algorithm
+ for the data type being sorted. It is currently mapped to
+ merge sort.
+
+ Examples
+ --------
+ >>> a = np.array([[1,4],[3,1]])
+ >>> np.sort(a) # sort along the last axis
+ array([[1, 4],
+ [1, 3]])
+ >>> np.sort(a, axis=None) # sort the flattened array
+ array([1, 1, 3, 4])
+ >>> np.sort(a, axis=0) # sort along the first axis
+ array([[1, 1],
+ [3, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
+ >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
+ ... ('Galahad', 1.7, 38)]
+ >>> a = np.array(values, dtype=dtype) # create a structured array
+ >>> np.sort(a, order='height') # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.8999999999999999, 38)],
+ dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
+ ('Arthur', 1.8, 41)],
+ dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2])
+ >>> np.argsort(x)
+ array([1, 2, 0])
+
+ Two-dimensional array:
+
+ >>> x = np.array([[0, 3], [2, 2]])
+ >>> x
+ array([[0, 3],
+ [2, 2]])
+
+ >>> np.argsort(x, axis=0) # sorts along first axis (down)
+ array([[0, 1],
+ [1, 0]])
+
+ >>> np.argsort(x, axis=1) # sorts along last axis (across)
+ array([[0, 1],
+ [0, 1]])
+
+ Indices of the sorted elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
+ >>> ind
+ (array([0, 1, 1, 0]), array([0, 0, 1, 1]))
+ >>> x[ind] # same as np.sort(x, axis=None)
+ array([0, 2, 2, 3])
+
+ Sorting with keys:
+
+ >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x
+ array([(1, 0), (0, 1)],
+ dtype=[('x', '>> np.argsort(x, order=('x','y'))
+ array([1, 0])
+
+ >>> np.argsort(x, order=('y','x'))
+ array([0, 1])
+
+ """
+ return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
+
+
+def _argmax_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
+def argmax(a, axis=None, out=None):
+ """
+ Returns the indices of the maximum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as `a.shape`
+ with the dimension along `axis` removed.
+
+ See Also
+ --------
+ ndarray.argmax, argmin
+ amax : The maximum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+
+ Notes
+ -----
+ In case of multiple occurrences of the maximum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmax(a)
+ 5
+ >>> np.argmax(a, axis=0)
+ array([1, 1, 1])
+ >>> np.argmax(a, axis=1)
+ array([2, 2])
+
+ Indexes of the maximal elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
+ >>> ind
+ (1, 2)
+ >>> a[ind]
+ 15
+
+ >>> b = np.arange(6)
+ >>> b[1] = 5
+ >>> b
+ array([0, 5, 2, 3, 4, 5])
+ >>> np.argmax(b) # Only the first occurrence is returned.
+ 1
+
+ """
+ return _wrapfunc(a, 'argmax', axis=axis, out=out)
+
+
+def _argmin_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
+def argmin(a, axis=None, out=None):
+ """
+ Returns the indices of the minimum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as `a.shape`
+ with the dimension along `axis` removed.
+
+ See Also
+ --------
+ ndarray.argmin, argmax
+ amin : The minimum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+
+ Notes
+ -----
+ In case of multiple occurrences of the minimum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmin(a)
+ 0
+ >>> np.argmin(a, axis=0)
+ array([0, 0, 0])
+ >>> np.argmin(a, axis=1)
+ array([0, 0])
+
+ Indices of the minimum elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
+ >>> ind
+ (0, 0)
+ >>> a[ind]
+ 10
+
+ >>> b = np.arange(6) + 10
+ >>> b[4] = 10
+ >>> b
+ array([10, 11, 12, 13, 10, 15])
+ >>> np.argmin(b) # Only the first occurrence is returned.
+ 0
+
+ """
+ return _wrapfunc(a, 'argmin', axis=axis, out=out)
+
+
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+ return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
+def searchsorted(a, v, side='left', sorter=None):
+ """
+ Find indices where elements should be inserted to maintain order.
+
+ Find the indices into a sorted array `a` such that, if the
+ corresponding elements in `v` were inserted before the indices, the
+ order of `a` would be preserved.
+
+ Assuming that `a` is sorted:
+
+ ====== ============================
+ `side` returned index `i` satisfies
+ ====== ============================
+ left ``a[i-1] < v <= a[i]``
+ right ``a[i-1] <= v < a[i]``
+ ====== ============================
+
+ Parameters
+ ----------
+ a : 1-D array_like
+ Input array. If `sorter` is None, then it must be sorted in
+ ascending order, otherwise `sorter` must be an array of indices
+ that sort it.
+ v : array_like
+ Values to insert into `a`.
+ side : {'left', 'right'}, optional
+ If 'left', the index of the first suitable location found is given.
+ If 'right', return the last such index. If there is no suitable
+ index, return either 0 or N (where N is the length of `a`).
+ sorter : 1-D array_like, optional
+ Optional array of integer indices that sort array a into ascending
+ order. They are typically the result of argsort.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ indices : array of ints
+ Array of insertion points with the same shape as `v`.
+
+ See Also
+ --------
+ sort : Return a sorted copy of an array.
+ histogram : Produce histogram from 1-D data.
+
+ Notes
+ -----
+ Binary search is used to find the required insertion points.
+
+ As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
+ `nan` values. The enhanced sort order is documented in `sort`.
+
+ This function is a faster version of the builtin python `bisect.bisect_left`
+ (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
+ which is also vectorized in the `v` argument.
+
+ Examples
+ --------
+ >>> np.searchsorted([1,2,3,4,5], 3)
+ 2
+ >>> np.searchsorted([1,2,3,4,5], 3, side='right')
+ 3
+ >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
+ array([0, 5, 1, 2])
+
+ """
+ return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+
+
+def _resize_dispatcher(a, new_shape):
+ return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
+def resize(a, new_shape):
+ """
+ Return a new array with the specified shape.
+
+ If the new array is larger than the original array, then the new
+ array is filled with repeated copies of `a`. Note that this behavior
+ is different from a.resize(new_shape) which fills with zeros instead
+ of repeated copies of `a`.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be resized.
+
+ new_shape : int or tuple of int
+ Shape of resized array.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ The new array is formed from the data in the old array, repeated
+ if necessary to fill out the required number of elements. The
+ data are repeated in the order that they are stored in memory.
+
+ See Also
+ --------
+ ndarray.resize : resize an array in-place.
+
+ Notes
+ -----
+ Warning: This functionality does **not** consider axes separately,
+ i.e. it does not apply interpolation/extrapolation.
+ It fills the return array with the required number of elements, taken
+ from `a` as they are laid out in memory, disregarding strides and axes.
+ (This is in case the new shape is smaller. For larger, see above.)
+ This functionality is therefore not suitable to resize images,
+ or data where each axis represents a separate and distinct entity.
+
+ Examples
+ --------
+ >>> a=np.array([[0,1],[2,3]])
+ >>> np.resize(a,(2,3))
+ array([[0, 1, 2],
+ [3, 0, 1]])
+ >>> np.resize(a,(1,4))
+ array([[0, 1, 2, 3]])
+ >>> np.resize(a,(2,4))
+ array([[0, 1, 2, 3],
+ [0, 1, 2, 3]])
+
+ """
+ if isinstance(new_shape, (int, nt.integer)):
+ new_shape = (new_shape,)
+ a = ravel(a)
+ Na = len(a)
+ total_size = um.multiply.reduce(new_shape)
+ if Na == 0 or total_size == 0:
+ return mu.zeros(new_shape, a.dtype)
+
+ n_copies = int(total_size / Na)
+ extra = total_size % Na
+
+ if extra != 0:
+ n_copies = n_copies + 1
+ extra = Na - extra
+
+ a = concatenate((a,) * n_copies)
+ if extra > 0:
+ a = a[:-extra]
+
+ return reshape(a, new_shape)
+
+
+def _squeeze_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
+def squeeze(a, axis=None):
+ """
+ Remove single-dimensional entries from the shape of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ .. versionadded:: 1.7.0
+
+ Selects a subset of the single-dimensional entries in the
+ shape. If an axis is selected with shape entry greater than
+ one, an error is raised.
+
+ Returns
+ -------
+ squeezed : ndarray
+ The input array, but with all or a subset of the
+ dimensions of length 1 removed. This is always `a` itself
+ or a view into `a`.
+
+ Raises
+ ------
+ ValueError
+ If `axis` is not `None`, and an axis being squeezed is not of length 1
+
+ See Also
+ --------
+ expand_dims : The inverse operation, adding singleton dimensions
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
+
+ Examples
+ --------
+ >>> x = np.array([[[0], [1], [2]]])
+ >>> x.shape
+ (1, 3, 1)
+ >>> np.squeeze(x).shape
+ (3,)
+ >>> np.squeeze(x, axis=0).shape
+ (3, 1)
+ >>> np.squeeze(x, axis=1).shape
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot select an axis to squeeze out which has size not equal to one
+ >>> np.squeeze(x, axis=2).shape
+ (1, 3)
+
+ """
+ try:
+ squeeze = a.squeeze
+ except AttributeError:
+ return _wrapit(a, 'squeeze')
+ if axis is None:
+ return squeeze()
+ else:
+ return squeeze(axis=axis)
+
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+ return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
+def diagonal(a, offset=0, axis1=0, axis2=1):
+ """
+ Return specified diagonals.
+
+ If `a` is 2-D, returns the diagonal of `a` with the given offset,
+ i.e., the collection of elements of the form ``a[i, i+offset]``. If
+ `a` has more than two dimensions, then the axes specified by `axis1`
+ and `axis2` are used to determine the 2-D sub-array whose diagonal is
+ returned. The shape of the resulting array can be determined by
+ removing `axis1` and `axis2` and appending an index to the right equal
+ to the size of the resulting diagonals.
+
+ In versions of NumPy prior to 1.7, this function always returned a new,
+ independent array containing a copy of the values in the diagonal.
+
+ In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
+ but depending on this fact is deprecated. Writing to the resulting
+ array continues to work as it used to, but a FutureWarning is issued.
+
+ Starting in NumPy 1.9 it returns a read-only view on the original array.
+ Attempting to write to the resulting array will produce an error.
+
+ In some future release, it will return a read/write view and writing to
+ the returned array will alter your original array. The returned array
+ will have the same type as the input array.
+
+ If you don't write to the array returned by this function, then you can
+ just ignore all of the above.
+
+ If you depend on the current behavior, then we suggest copying the
+ returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
+ of just ``np.diagonal(a)``. This will work with both past and future
+ versions of NumPy.
+
+ Parameters
+ ----------
+ a : array_like
+ Array from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be positive or
+ negative. Defaults to main diagonal (0).
+ axis1 : int, optional
+ Axis to be used as the first axis of the 2-D sub-arrays from which
+ the diagonals should be taken. Defaults to first axis (0).
+ axis2 : int, optional
+ Axis to be used as the second axis of the 2-D sub-arrays from
+ which the diagonals should be taken. Defaults to second axis (1).
+
+ Returns
+ -------
+ array_of_diagonals : ndarray
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
+ If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
+ are removed, and a new axis inserted at the end corresponding to the
+ diagonal.
+
+ Raises
+ ------
+ ValueError
+ If the dimension of `a` is less than 2.
+
+ See Also
+ --------
+ diag : MATLAB work-a-like for 1-D and 2-D arrays.
+ diagflat : Create diagonal arrays.
+ trace : Sum along diagonals.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> a.diagonal()
+ array([0, 3])
+ >>> a.diagonal(1)
+ array([1])
+
+ A 3-D example:
+
+ >>> a = np.arange(8).reshape(2,2,2); a
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
+ ... 0, # across the outer(left)-most axis last and
+ ... 1) # the "middle" (row) axis first.
+ array([[0, 6],
+ [1, 7]])
+
+ The sub-arrays whose main diagonals we just obtained; note that each
+ corresponds to fixing the right-most (column) axis, and that the
+ diagonals are "packed" in rows.
+
+ >>> a[:,:,0] # main diagonal is [0 6]
+ array([[0, 2],
+ [4, 6]])
+ >>> a[:,:,1] # main diagonal is [1 7]
+ array([[1, 3],
+ [5, 7]])
+
+ """
+ if isinstance(a, np.matrix):
+ # Make diagonal of matrix 1-D to preserve backward compatibility.
+ return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+ else:
+ return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+
+
+def _trace_dispatcher(
+ a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ """
+ Return the sum along diagonals of the array.
+
+ If `a` is 2-D, the sum along its diagonal with the given offset
+ is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
+
+ If `a` has more than two dimensions, then the axes specified by axis1 and
+ axis2 are used to determine the 2-D sub-arrays whose traces are returned.
+ The shape of the resulting array is the same as that of `a` with `axis1`
+ and `axis2` removed.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be both positive
+ and negative. Defaults to 0.
+ axis1, axis2 : int, optional
+ Axes to be used as the first and second axis of the 2-D sub-arrays
+ from which the diagonals should be taken. Defaults are the first two
+ axes of `a`.
+ dtype : dtype, optional
+ Determines the data-type of the returned array and of the accumulator
+ where the elements are summed. If dtype has the value None and `a` is
+ of integer type of precision less than the default integer
+ precision, then the default integer precision is used. Otherwise,
+ the precision is the same as that of `a`.
+ out : ndarray, optional
+ Array into which the output is placed. Its type is preserved and
+ it must be of the right shape to hold the output.
+
+ Returns
+ -------
+ sum_along_diagonals : ndarray
+ If `a` is 2-D, the sum along the diagonal is returned. If `a` has
+ larger dimensions, then an array of sums along diagonals is returned.
+
+ See Also
+ --------
+ diag, diagonal, diagflat
+
+ Examples
+ --------
+ >>> np.trace(np.eye(3))
+ 3.0
+ >>> a = np.arange(8).reshape((2,2,2))
+ >>> np.trace(a)
+ array([6, 8])
+
+ >>> a = np.arange(24).reshape((2,2,2,3))
+ >>> np.trace(a).shape
+ (2, 3)
+
+ """
+ if isinstance(a, np.matrix):
+ # Get trace of matrix via an array to preserve backward compatibility.
+ return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+ else:
+ return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+
+
+def _ravel_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
+def ravel(a, order='C'):
+ """Return a contiguous flattened array.
+
+ A 1-D array, containing the elements of the input, is returned. A copy is
+ made only if needed.
+
+ As of NumPy 1.10, the returned array will have the same type as the input
+ array. (for example, a masked array will be returned for a masked array
+ input)
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. The elements in `a` are read in the order specified by
+ `order`, and packed as a 1-D array.
+ order : {'C','F', 'A', 'K'}, optional
+
+ The elements of `a` are read using this index order. 'C' means
+ to index the elements in row-major, C-style order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to index the elements
+ in column-major, Fortran-style order, with the
+ first index changing fastest, and the last index changing
+ slowest. Note that the 'C' and 'F' options take no account of
+ the memory layout of the underlying array, and only refer to
+ the order of axis indexing. 'A' means to read the elements in
+ Fortran-like index order if `a` is Fortran *contiguous* in
+ memory, C-like order otherwise. 'K' means to read the
+ elements in the order they occur in memory, except for
+ reversing the data when strides are negative. By default, 'C'
+ index order is used.
+
+ Returns
+ -------
+ y : array_like
+ y is an array of the same subtype as `a`, with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility, if `a`
+ is a matrix, then y is a 1-D ndarray.
+
+ See Also
+ --------
+ ndarray.flat : 1-D iterator over an array.
+ ndarray.flatten : 1-D array copy of the elements of an array
+ in row-major order.
+ ndarray.reshape : Change the shape of an array without changing its data.
+
+ Notes
+ -----
+ In row-major, C-style order, in two dimensions, the row index
+ varies the slowest, and the column index the quickest. This can
+ be generalized to multiple dimensions, where row-major order
+ implies that the index along the first axis varies slowest, and
+ the index along the last quickest. The opposite holds for
+ column-major, Fortran-style index ordering.
+
+ When a view is desired in as many cases as possible, ``arr.reshape(-1)``
+ may be preferable.
+
+ Examples
+ --------
+ It is equivalent to ``reshape(-1, order=order)``.
+
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> print(np.ravel(x))
+ [1 2 3 4 5 6]
+
+ >>> print(x.reshape(-1))
+ [1 2 3 4 5 6]
+
+ >>> print(np.ravel(x, order='F'))
+ [1 4 2 5 3 6]
+
+ When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
+
+ >>> print(np.ravel(x.T))
+ [1 4 2 5 3 6]
+ >>> print(np.ravel(x.T, order='A'))
+ [1 2 3 4 5 6]
+
+ When ``order`` is 'K', it will preserve orderings that are neither 'C'
+ nor 'F', but won't reverse axes:
+
+ >>> a = np.arange(3)[::-1]; a
+ array([2, 1, 0])
+ >>> a.ravel(order='C')
+ array([2, 1, 0])
+ >>> a.ravel(order='K')
+ array([2, 1, 0])
+
+ >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
+ array([[[ 0, 2, 4],
+ [ 1, 3, 5]],
+ [[ 6, 8, 10],
+ [ 7, 9, 11]]])
+ >>> a.ravel(order='C')
+ array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
+ >>> a.ravel(order='K')
+ array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+
+ """
+ if isinstance(a, np.matrix):
+ return asarray(a).ravel(order=order)
+ else:
+ return asanyarray(a).ravel(order=order)
+
+
+def _nonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
+def nonzero(a):
+ """
+ Return the indices of the elements that are non-zero.
+
+ Returns a tuple of arrays, one for each dimension of `a`,
+ containing the indices of the non-zero elements in that
+ dimension. The values in `a` are always tested and returned in
+ row-major, C-style order. The corresponding non-zero
+ values can be obtained with::
+
+ a[nonzero(a)]
+
+ To group the indices by element, rather than dimension, use::
+
+ transpose(nonzero(a))
+
+ The result of this is always a 2-D array, with a row for
+ each non-zero element.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ tuple_of_arrays : tuple
+ Indices of elements that are non-zero.
+
+ See Also
+ --------
+ flatnonzero :
+ Return indices that are non-zero in the flattened version of the input
+ array.
+ ndarray.nonzero :
+ Equivalent ndarray method.
+ count_nonzero :
+ Counts the number of non-zero elements in the input array.
+
+ Examples
+ --------
+ >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
+ >>> x
+ array([[3, 0, 0],
+ [0, 4, 0],
+ [5, 6, 0]])
+ >>> np.nonzero(x)
+ (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
+
+ >>> x[np.nonzero(x)]
+ array([3, 4, 5, 6])
+ >>> np.transpose(np.nonzero(x))
+ array([[0, 0],
+ [1, 1],
+ [2, 0],
+ [2, 1])
+
+ A common use for ``nonzero`` is to find the indices of an array, where
+ a condition is True. Given an array `a`, the condition `a` > 3 is a
+ boolean array and since False is interpreted as 0, np.nonzero(a > 3)
+ yields the indices of the `a` where the condition is true.
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> a > 3
+ array([[False, False, False],
+ [ True, True, True],
+ [ True, True, True]])
+ >>> np.nonzero(a > 3)
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ Using this result to index `a` is equivalent to using the mask directly:
+
+ >>> a[np.nonzero(a > 3)]
+ array([4, 5, 6, 7, 8, 9])
+ >>> a[a > 3] # prefer this spelling
+ array([4, 5, 6, 7, 8, 9])
+
+ ``nonzero`` can also be called as a method of the array.
+
+ >>> (a > 3).nonzero()
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ """
+ return _wrapfunc(a, 'nonzero')
+
+
+def _shape_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
+def shape(a):
+ """
+ Return the shape of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ shape : tuple of ints
+ The elements of the shape tuple give the lengths of the
+ corresponding array dimensions.
+
+ See Also
+ --------
+ alen
+ ndarray.shape : Equivalent array method.
+
+ Examples
+ --------
+ >>> np.shape(np.eye(3))
+ (3, 3)
+ >>> np.shape([[1, 2]])
+ (1, 2)
+ >>> np.shape([0])
+ (1,)
+ >>> np.shape(0)
+ ()
+
+ >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> np.shape(a)
+ (2,)
+ >>> a.shape
+ (2,)
+
+ """
+ try:
+ result = a.shape
+ except AttributeError:
+ result = asarray(a).shape
+ return result
+
+
+def _compress_dispatcher(condition, a, axis=None, out=None):
+ return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
+def compress(condition, a, axis=None, out=None):
+ """
+ Return selected slices of an array along given axis.
+
+ When working along a given axis, a slice along that axis is returned in
+ `output` for each index where `condition` evaluates to True. When
+ working on a 1-D array, `compress` is equivalent to `extract`.
+
+ Parameters
+ ----------
+ condition : 1-D array of bools
+ Array that selects which entries to return. If len(condition)
+ is less than the size of `a` along the given axis, then output is
+ truncated to the length of the condition array.
+ a : array_like
+ Array from which to extract a part.
+ axis : int, optional
+ Axis along which to take slices. If None (default), work on the
+ flattened array.
+ out : ndarray, optional
+ Output array. Its type is preserved and it must be of the right
+ shape to hold the output.
+
+ Returns
+ -------
+ compressed_array : ndarray
+ A copy of `a` without the slices along axis for which `condition`
+ is false.
+
+ See Also
+ --------
+ take, choose, diag, diagonal, select
+ ndarray.compress : Equivalent method in ndarray
+ np.extract: Equivalent method when working on 1-D arrays
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> a
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.compress([0, 1], a, axis=0)
+ array([[3, 4]])
+ >>> np.compress([False, True, True], a, axis=0)
+ array([[3, 4],
+ [5, 6]])
+ >>> np.compress([False, True], a, axis=1)
+ array([[2],
+ [4],
+ [6]])
+
+ Working on the flattened array does not return slices along an axis but
+ selects elements.
+
+ >>> np.compress([False, True], a)
+ array([2])
+
+ """
+ return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
+
+
+def _clip_dispatcher(a, a_min, a_max, out=None):
+ return (a, a_min, a_max)
+
+
+@array_function_dispatch(_clip_dispatcher)
+def clip(a, a_min, a_max, out=None):
+ """
+ Clip (limit) the values in an array.
+
+ Given an interval, values outside the interval are clipped to
+ the interval edges. For example, if an interval of ``[0, 1]``
+ is specified, values smaller than 0 become 0, and values larger
+ than 1 become 1.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing elements to clip.
+ a_min : scalar or array_like or `None`
+ Minimum value. If `None`, clipping is not performed on lower
+ interval edge. Not more than one of `a_min` and `a_max` may be
+ `None`.
+ a_max : scalar or array_like or `None`
+ Maximum value. If `None`, clipping is not performed on upper
+ interval edge. Not more than one of `a_min` and `a_max` may be
+ `None`. If `a_min` or `a_max` are array_like, then the three
+ arrays will be broadcasted to match their shapes.
+ out : ndarray, optional
+ The results will be placed in this array. It may be the input
+ array for in-place clipping. `out` must be of the right shape
+ to hold the output. Its type is preserved.
+
+ Returns
+ -------
+ clipped_array : ndarray
+ An array with the elements of `a`, but where values
+ < `a_min` are replaced with `a_min`, and those > `a_max`
+ with `a_max`.
+
+ See Also
+ --------
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> np.clip(a, 1, 8)
+ array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, 3, 6, out=a)
+ array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
+ array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
+
+ """
+ return _wrapfunc(a, 'clip', a_min, a_max, out=out)
+
+
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_sum_dispatcher)
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+ """
+ Sum of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Elements to sum.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a sum is performed. The default,
+ axis=None, will sum all of the elements of the input array. If
+ axis is negative it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If axis is a tuple of ints, a sum is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
+ dtype : dtype, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. The dtype of `a` is used by default unless `a`
+ has an integer dtype of less precision than the default platform
+ integer. In that case, if `a` is signed then the platform integer
+ is used while if `a` is unsigned then an unsigned integer of the
+ same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `sum` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ Returns
+ -------
+ sum_along_axis : ndarray
+ An array with the same shape as `a`, with the specified
+ axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
+ is returned. If an output array is specified, a reference to
+ `out` is returned.
+
+ See Also
+ --------
+ ndarray.sum : Equivalent method.
+
+ cumsum : Cumulative sum of array elements.
+
+ trapz : Integration of array values using the composite trapezoidal rule.
+
+ mean, average
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ The sum of an empty array is the neutral element 0:
+
+ >>> np.sum([])
+ 0.0
+
+ Examples
+ --------
+ >>> np.sum([0.5, 1.5])
+ 2.0
+ >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
+ 1
+ >>> np.sum([[0, 1], [0, 5]])
+ 6
+ >>> np.sum([[0, 1], [0, 5]], axis=0)
+ array([0, 6])
+ >>> np.sum([[0, 1], [0, 5]], axis=1)
+ array([1, 5])
+
+ If the accumulator is too small, overflow occurs:
+
+ >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
+ -128
+
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
+ """
+ if isinstance(a, _gentype):
+ # 2018-02-25, 1.15.0
+ warnings.warn(
+ "Calling np.sum(generator) is deprecated, and in the future will give a different result. "
+ "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
+ DeprecationWarning, stacklevel=2)
+
+ res = _sum_(a)
+ if out is not None:
+ out[...] = res
+ return out
+ return res
+
+ return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
+
+
+def _any_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_any_dispatcher)
+def any(a, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Test whether any array element along a given axis evaluates to True.
+
+ Returns single boolean unless `axis` is not ``None``
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical OR reduction is performed.
+ The default (`axis` = `None`) is to perform a logical OR over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output and its type is preserved
+ (e.g., if it is of type float, then it will remain so, returning
+ 1.0 for True and 0.0 for False, regardless of the type of `a`).
+ See `doc.ufuncs` (Section "Output arguments") for details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `any` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ any : bool or ndarray
+ A new boolean or `ndarray` is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.any : equivalent method
+
+ all : Test whether all elements along a given axis evaluate to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity evaluate
+ to `True` because these are not equal to zero.
+
+ Examples
+ --------
+ >>> np.any([[True, False], [True, True]])
+ True
+
+ >>> np.any([[True, False], [False, False]], axis=0)
+ array([ True, False])
+
+ >>> np.any([-1, 0, 5])
+ True
+
+ >>> np.any(np.nan)
+ True
+
+ >>> o=np.array([False])
+ >>> z=np.any([-1, 4, 5], out=o)
+ >>> z, o
+ (array([ True]), array([ True]))
+ >>> # Check now that z is a reference to o
+ >>> z is o
+ True
+ >>> id(z), id(o) # identity of z and o # doctest: +SKIP
+ (191614240, 191614240)
+
+ """
+ return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims)
+
+
+def _all_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_all_dispatcher)
+def all(a, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Test whether all array elements along a given axis evaluate to True.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical AND reduction is performed.
+ The default (`axis` = `None`) is to perform a logical AND over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result.
+ It must have the same shape as the expected output and its
+ type is preserved (e.g., if ``dtype(out)`` is float, the result
+ will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
+ "Output arguments") for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `all` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ all : ndarray, bool
+ A new boolean or array is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.all : equivalent method
+
+ any : Test whether any element along a given axis evaluates to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity
+ evaluate to `True` because these are not equal to zero.
+
+ Examples
+ --------
+ >>> np.all([[True,False],[True,True]])
+ False
+
+ >>> np.all([[True,False],[True,True]], axis=0)
+ array([ True, False])
+
+ >>> np.all([-1, 4, 5])
+ True
+
+ >>> np.all([1.0, np.nan])
+ True
+
+ >>> o=np.array([False])
+ >>> z=np.all([-1, 4, 5], out=o)
+ >>> id(z), id(o), z # doctest: +SKIP
+ (28293632, 28293632, array([ True]))
+
+ """
+ return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims)
+
+
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
+def cumsum(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of the elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is to compute the cumsum over the flattened array.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See `doc.ufuncs`
+ (Section "Output arguments") for more details.
+
+ Returns
+ -------
+ cumsum_along_axis : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to `out` is returned. The
+ result has the same size as `a`, and the same shape as `a` if
+ `axis` is not None or `a` is a 1-d array.
+
+
+ See Also
+ --------
+ sum : Sum array elements.
+
+ trapz : Integration of array values using the composite trapezoidal rule.
+
+ diff : Calculate the n-th discrete difference along given axis.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.cumsum(a)
+ array([ 1, 3, 6, 10, 15, 21])
+ >>> np.cumsum(a, dtype=float) # specifies type of output value(s)
+ array([ 1., 3., 6., 10., 15., 21.])
+
+ >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
+ array([[1, 2, 3],
+ [5, 7, 9]])
+ >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
+ array([[ 1, 3, 6],
+ [ 4, 9, 15]])
+
+ """
+ return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+
+
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
+def ptp(a, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Range of values (maximum - minimum) along an axis.
+
+ The name of the function comes from the acronym for 'peak to peak'.
+
+ Parameters
+ ----------
+ a : array_like
+ Input values.
+ axis : None or int or tuple of ints, optional
+ Axis along which to find the peaks. By default, flatten the
+ array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.15.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : array_like
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type of the output values will be cast if necessary.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `ptp` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ ptp : ndarray
+ A new array holding the result, unless `out` was
+ specified, in which case a reference to `out` is returned.
+
+ Examples
+ --------
+ >>> x = np.arange(4).reshape((2,2))
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+
+ >>> np.ptp(x, axis=0)
+ array([2, 2])
+
+ >>> np.ptp(x, axis=1)
+ array([1, 1])
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if type(a) is not mu.ndarray:
+ try:
+ ptp = a.ptp
+ except AttributeError:
+ pass
+ else:
+ return ptp(axis=axis, out=out, **kwargs)
+ return _methods._ptp(a, axis=axis, out=out, **kwargs)
+
+
+def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amax_dispatcher)
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, the maximum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See `doc.ufuncs` (Section "Output arguments") for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amax` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+
+ Returns
+ -------
+ amax : ndarray or scalar
+ Maximum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is given, the result is an array of dimension
+ ``a.ndim - 1``.
+
+ See Also
+ --------
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ argmax :
+ Return the indices of the maximum values.
+
+ nanmin, minimum, fmin
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding max value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmax.
+
+ Don't use `amax` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
+ ``amax(a, axis=0)``.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.amax(a) # Maximum of the flattened array
+ 3
+ >>> np.amax(a, axis=0) # Maxima along the first axis
+ array([2, 3])
+ >>> np.amax(a, axis=1) # Maxima along the second axis
+ array([1, 3])
+
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.NaN
+ >>> np.amax(b)
+ nan
+ >>> np.nanmax(b)
+ 4.0
+
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.max([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.max([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
+ """
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims,
+ initial=initial)
+
+
+def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amin_dispatcher)
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+ """
+ Return the minimum of an array or minimum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, the minimum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See `doc.ufuncs` (Section "Output arguments") for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amin` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ Returns
+ -------
+ amin : ndarray or scalar
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is given, the result is an array of dimension
+ ``a.ndim - 1``.
+
+ See Also
+ --------
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ argmin :
+ Return the indices of the minimum values.
+
+ nanmax, maximum, fmax
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding min value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmin.
+
+ Don't use `amin` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
+ ``amin(a, axis=0)``.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.amin(a) # Minimum of the flattened array
+ 0
+ >>> np.amin(a, axis=0) # Minima along the first axis
+ array([0, 1])
+ >>> np.amin(a, axis=1) # Minima along the second axis
+ array([0, 2])
+
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.NaN
+ >>> np.amin(b)
+ nan
+ >>> np.nanmin(b)
+ 0.0
+
+ >>> np.min([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.min([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
+ """
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims,
+ initial=initial)
+
+
+def _alen_dispathcer(a):
+ return (a,)
+
+
+@array_function_dispatch(_alen_dispathcer)
+def alen(a):
+ """
+ Return the length of the first dimension of the input array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ alen : int
+ Length of the first dimension of `a`.
+
+ See Also
+ --------
+ shape, size
+
+ Examples
+ --------
+ >>> a = np.zeros((7,4,5))
+ >>> a.shape[0]
+ 7
+ >>> np.alen(a)
+ 7
+
+ """
+ try:
+ return len(a)
+ except TypeError:
+ return len(array(a, ndmin=1))
+
+
+def _prod_dispatcher(
+ a, axis=None, dtype=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+ """
+ Return the product of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a product is performed. The default,
+ axis=None, will calculate the product of all the elements in the
+ input array. If axis is negative it counts from the last to the
+ first axis.
+
+ .. versionadded:: 1.7.0
+
+ If axis is a tuple of ints, a product is performed on all of the
+ axes specified in the tuple instead of a single axis or all the
+ axes as before.
+ dtype : dtype, optional
+ The type of the returned array, as well as of the accumulator in
+ which the elements are multiplied. The dtype of `a` is used by
+ default unless `a` has an integer dtype of less precision than the
+ default platform integer. In that case, if `a` is signed then the
+ platform integer is used while if `a` is unsigned then an unsigned
+ integer of the same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in the
+ result as dimensions with size one. With this option, the result
+ will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `prod` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ Returns
+ -------
+ product_along_axis : ndarray, see `dtype` parameter above.
+ An array shaped as `a` but with the specified axis removed.
+ Returns a reference to `out` if specified.
+
+ See Also
+ --------
+ ndarray.prod : equivalent method
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow. That means that, on a 32-bit platform:
+
+ >>> x = np.array([536870910, 536870910, 536870910, 536870910])
+ >>> np.prod(x) # random
+ 16
+
+ The product of an empty array is the neutral element 1:
+
+ >>> np.prod([])
+ 1.0
+
+ Examples
+ --------
+ By default, calculate the product of all elements:
+
+ >>> np.prod([1.,2.])
+ 2.0
+
+ Even when the input array is two-dimensional:
+
+ >>> np.prod([[1.,2.],[3.,4.]])
+ 24.0
+
+ But we can also specify the axis over which to multiply:
+
+ >>> np.prod([[1.,2.],[3.,4.]], axis=1)
+ array([ 2., 12.])
+
+ If the type of `x` is unsigned, then the output type is
+ the unsigned platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.uint8)
+ >>> np.prod(x).dtype == np.uint
+ True
+
+ If `x` is of a signed integer type, then the output type
+ is the default platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.int8)
+ >>> np.prod(x).dtype == int
+ True
+
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
+ """
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims,
+ initial=initial)
+
+
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
+def cumprod(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. By default
+ the input is flattened.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If *dtype* is not specified, it
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
+ a precision less than that of the default platform integer. In
+ that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+
+ Returns
+ -------
+ cumprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to out is returned.
+
+ See Also
+ --------
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ Examples
+ --------
+ >>> a = np.array([1,2,3])
+ >>> np.cumprod(a) # intermediate results 1, 1*2
+ ... # total product 1*2*3 = 6
+ array([1, 2, 6])
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.cumprod(a, dtype=float) # specify type of output
+ array([ 1., 2., 6., 24., 120., 720.])
+
+ The cumulative product for each column (i.e., over the rows) of `a`:
+
+ >>> np.cumprod(a, axis=0)
+ array([[ 1, 2, 3],
+ [ 4, 10, 18]])
+
+ The cumulative product for each row (i.e. over the columns) of `a`:
+
+ >>> np.cumprod(a,axis=1)
+ array([[ 1, 2, 6],
+ [ 4, 20, 120]])
+
+ """
+ return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+
+
+def _ndim_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
+def ndim(a):
+ """
+ Return the number of dimensions of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. If it is not already an ndarray, a conversion is
+ attempted.
+
+ Returns
+ -------
+ number_of_dimensions : int
+ The number of dimensions in `a`. Scalars are zero-dimensional.
+
+ See Also
+ --------
+ ndarray.ndim : equivalent method
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+
+ Examples
+ --------
+ >>> np.ndim([[1,2,3],[4,5,6]])
+ 2
+ >>> np.ndim(np.array([[1,2,3],[4,5,6]]))
+ 2
+ >>> np.ndim(1)
+ 0
+
+ """
+ try:
+ return a.ndim
+ except AttributeError:
+ return asarray(a).ndim
+
+
+def _size_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
+def size(a, axis=None):
+ """
+ Return the number of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which the elements are counted. By default, give
+ the total number of elements.
+
+ Returns
+ -------
+ element_count : int
+ Number of elements along the specified axis.
+
+ See Also
+ --------
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+ ndarray.size : number of elements in array
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3],[4,5,6]])
+ >>> np.size(a)
+ 6
+ >>> np.size(a,1)
+ 3
+ >>> np.size(a,0)
+ 2
+
+ """
+ if axis is None:
+ try:
+ return a.size
+ except AttributeError:
+ return asarray(a).size
+ else:
+ try:
+ return a.shape[axis]
+ except AttributeError:
+ return asarray(a).shape[axis]
+
+
+def _around_dispatcher(a, decimals=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_around_dispatcher)
+def around(a, decimals=0, out=None):
+ """
+ Evenly round to the given number of decimals.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ decimals : int, optional
+ Number of decimal places to round to (default: 0). If
+ decimals is negative, it specifies the number of positions to
+ the left of the decimal point.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary. See `doc.ufuncs` (Section
+ "Output arguments") for details.
+
+ Returns
+ -------
+ rounded_array : ndarray
+ An array of the same type as `a`, containing the rounded values.
+ Unless `out` was specified, a new array is created. A reference to
+ the result is returned.
+
+ The real and imaginary parts of complex numbers are rounded
+ separately. The result of rounding a float is a float.
+
+ See Also
+ --------
+ ndarray.round : equivalent method
+
+ ceil, fix, floor, rint, trunc
+
+
+ Notes
+ -----
+ For values exactly halfway between rounded decimal values, NumPy
+ rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
+ -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
+ to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling
+ by powers of ten.
+
+ References
+ ----------
+ .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
+ https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+ .. [2] "How Futile are Mindless Assessments of
+ Roundoff in Floating-Point Computation?", William Kahan,
+ https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
+
+ Examples
+ --------
+ >>> np.around([0.37, 1.64])
+ array([ 0., 2.])
+ >>> np.around([0.37, 1.64], decimals=1)
+ array([ 0.4, 1.6])
+ >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
+ array([ 0., 2., 2., 4., 4.])
+ >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
+ array([ 1, 2, 3, 11])
+ >>> np.around([1,2,3,11], decimals=-1)
+ array([ 0, 0, 0, 10])
+
+ """
+ return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_mean_dispatcher)
+def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+ """
+ Compute the arithmetic mean along the specified axis.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the means are computed. The default is to
+ compute the mean of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a mean is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for floating point inputs, it is the same as the
+ input dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See `doc.ufuncs` for details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `mean` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned.
+
+ See Also
+ --------
+ average : Weighted average
+ std, var, nanmean, nanstd, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the elements along the axis divided
+ by the number of elements.
+
+ Note that for floating-point input, the mean is computed using the
+ same precision the input has. Depending on the input data, this can
+ cause the results to be inaccurate, especially for `float32` (see
+ example below). Specifying a higher-precision accumulator using the
+ `dtype` keyword can alleviate this issue.
+
+ By default, `float16` results are computed using `float32` intermediates
+ for extra precision.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.mean(a)
+ 2.5
+ >>> np.mean(a, axis=0)
+ array([ 2., 3.])
+ >>> np.mean(a, axis=1)
+ array([ 1.5, 3.5])
+
+ In single precision, `mean` can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.mean(a)
+ 0.54999924
+
+ Computing the mean in float64 is more accurate:
+
+ >>> np.mean(a, dtype=np.float64)
+ 0.55000000074505806
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if type(a) is not mu.ndarray:
+ try:
+ mean = a.mean
+ except AttributeError:
+ pass
+ else:
+ return mean(axis=axis, dtype=dtype, out=out, **kwargs)
+
+ return _methods._mean(a, axis=axis, dtype=dtype,
+ out=out, **kwargs)
+
+
+def _std_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_std_dispatcher)
+def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
+ """
+ Compute the standard deviation along the specified axis.
+
+ Returns the standard deviation, a measure of the spread of a distribution,
+ of the array elements. The standard deviation is computed for the
+ flattened array by default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of these values.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the standard deviation is computed. The
+ default is to compute the standard deviation of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a standard deviation is performed over
+ multiple axes, instead of a single axis or all the axes as before.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it is
+ the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the calculated
+ values) will be cast if necessary.
+ ddof : int, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of elements.
+ By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `std` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard deviation,
+ otherwise return a reference to the output array.
+
+ See Also
+ --------
+ var, mean, nanmean, nanstd, nanvar
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ The standard deviation is the square root of the average of the squared
+ deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
+
+ The average squared deviation is normally calculated as
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
+ the divisor ``N - ddof`` is used instead. In standard statistical
+ practice, ``ddof=1`` provides an unbiased estimator of the variance
+ of the infinite population. ``ddof=0`` provides a maximum likelihood
+ estimate of the variance for normally distributed variables. The
+ standard deviation computed in this function is the square root of
+ the estimated variance, so even with ``ddof=1``, it will not be an
+ unbiased estimate of the standard deviation per se.
+
+ Note that, for complex numbers, `std` takes the absolute
+ value before squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the *std* is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example below).
+ Specifying a higher-accuracy accumulator using the `dtype` keyword can
+ alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.std(a)
+ 1.1180339887498949
+ >>> np.std(a, axis=0)
+ array([ 1., 1.])
+ >>> np.std(a, axis=1)
+ array([ 0.5, 0.5])
+
+ In single precision, std() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.std(a)
+ 0.45000005
+
+ Computing the standard deviation in float64 is more accurate:
+
+ >>> np.std(a, dtype=np.float64)
+ 0.44999999925494177
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+
+ if type(a) is not mu.ndarray:
+ try:
+ std = a.std
+ except AttributeError:
+ pass
+ else:
+ return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
+
+
+def _var_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_var_dispatcher)
+def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
+ """
+ Compute the variance along the specified axis.
+
+ Returns the variance of the array elements, a measure of the spread of a
+ distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the variance is computed. The default is to
+ compute the variance of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a variance is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float32`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : int, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of elements. By
+ default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `var` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If ``out=None``, returns a new array containing the variance;
+ otherwise, a reference to the output array is returned.
+
+ See Also
+ --------
+ std , mean, nanmean, nanstd, nanvar
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ The variance is the average of the squared deviations from the mean,
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
+
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+ instead. In standard statistical practice, ``ddof=1`` provides an
+ unbiased estimator of the variance of a hypothetical infinite population.
+ ``ddof=0`` provides a maximum likelihood estimate of the variance for
+ normally distributed variables.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.var(a)
+ 1.25
+ >>> np.var(a, axis=0)
+ array([ 1., 1.])
+ >>> np.var(a, axis=1)
+ array([ 0.25, 0.25])
+
+ In single precision, var() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.var(a)
+ 0.20250003
+
+ Computing the variance in float64 is more accurate:
+
+ >>> np.var(a, dtype=np.float64)
+ 0.20249999932944759
+ >>> ((1-0.55)**2 + (0.1-0.55)**2)/2
+ 0.2025
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+
+ if type(a) is not mu.ndarray:
+ try:
+ var = a.var
+
+ except AttributeError:
+ pass
+ else:
+ return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
+
+
+# Aliases of other functions. These have their own definitions only so that
+# they can have unique docstrings.
+
+@array_function_dispatch(_around_dispatcher)
+def round_(a, decimals=0, out=None):
+ """
+ Round an array to the given number of decimals.
+
+ See Also
+ --------
+ around : equivalent function; see for details.
+ """
+ return around(a, decimals=decimals, out=out)
+
+
+@array_function_dispatch(_prod_dispatcher, verify=False)
+def product(*args, **kwargs):
+ """
+ Return the product of array elements over a given axis.
+
+ See Also
+ --------
+ prod : equivalent function; see for details.
+ """
+ return prod(*args, **kwargs)
+
+
+@array_function_dispatch(_cumprod_dispatcher, verify=False)
+def cumproduct(*args, **kwargs):
+ """
+ Return the cumulative product over the given axis.
+
+ See Also
+ --------
+ cumprod : equivalent function; see for details.
+ """
+ return cumprod(*args, **kwargs)
+
+
+@array_function_dispatch(_any_dispatcher, verify=False)
+def sometrue(*args, **kwargs):
+ """
+ Check whether some values are true.
+
+ Refer to `any` for full documentation.
+
+ See Also
+ --------
+ any : equivalent function; see for details.
+ """
+ return any(*args, **kwargs)
+
+
+@array_function_dispatch(_all_dispatcher, verify=False)
+def alltrue(*args, **kwargs):
+ """
+ Check if all elements of input array are true.
+
+ See Also
+ --------
+ numpy.all : Equivalent function; see for details.
+ """
+ return all(*args, **kwargs)
+
+
+@array_function_dispatch(_ndim_dispatcher)
+def rank(a):
+ """
+ Return the number of dimensions of an array.
+
+ .. note::
+ This function is deprecated in NumPy 1.9 to avoid confusion with
+ `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
+ should be used instead.
+
+ See Also
+ --------
+ ndim : equivalent non-deprecated function
+
+ Notes
+ -----
+ In the old Numeric package, `rank` was the term used for the number of
+ dimensions, but in NumPy `ndim` is used instead.
+ """
+ # 2014-04-12, 1.9
+ warnings.warn(
+ "`rank` is deprecated; use the `ndim` attribute or function instead. "
+ "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
+ VisibleDeprecationWarning, stacklevel=2)
+ return ndim(a)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc
new file mode 100644
index 0000000..88d2d5e
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/fromnumeric.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py
new file mode 100644
index 0000000..b68fd40
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.py
@@ -0,0 +1,462 @@
+from __future__ import division, absolute_import, print_function
+
+import functools
+import warnings
+import operator
+
+from . import numeric as _nx
+from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
+ TooHardError, asanyarray)
+from numpy.core.multiarray import add_docstring
+from numpy.core import overrides
+
+__all__ = ['logspace', 'linspace', 'geomspace']
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _index_deprecate(i, stacklevel=2):
+ try:
+ i = operator.index(i)
+ except TypeError:
+ msg = ("object of type {} cannot be safely interpreted as "
+ "an integer.".format(type(i)))
+ i = int(i)
+ stacklevel += 1
+ warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
+ return i
+
+
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+ axis=0):
+ """
+ Return evenly spaced numbers over a specified interval.
+
+ Returns `num` evenly spaced samples, calculated over the
+ interval [`start`, `stop`].
+
+ The endpoint of the interval can optionally be excluded.
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The end value of the sequence, unless `endpoint` is set to False.
+ In that case, the sequence consists of all but the last of ``num + 1``
+ evenly spaced samples, so that `stop` is excluded. Note that the step
+ size changes when `endpoint` is False.
+ num : int, optional
+ Number of samples to generate. Default is 50. Must be non-negative.
+ endpoint : bool, optional
+ If True, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ retstep : bool, optional
+ If True, return (`samples`, `step`), where `step` is the spacing
+ between samples.
+ dtype : dtype, optional
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+
+ .. versionadded:: 1.9.0
+
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+ Returns
+ -------
+ samples : ndarray
+ There are `num` equally spaced samples in the closed interval
+ ``[start, stop]`` or the half-open interval ``[start, stop)``
+ (depending on whether `endpoint` is True or False).
+ step : float, optional
+ Only returned if `retstep` is True
+
+ Size of spacing between samples.
+
+
+ See Also
+ --------
+ arange : Similar to `linspace`, but uses a step size (instead of the
+ number of samples).
+ geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
+ scale (a geometric progression).
+ logspace : Similar to `geomspace`, but with the end points specified as
+ logarithms.
+
+ Examples
+ --------
+ >>> np.linspace(2.0, 3.0, num=5)
+ array([ 2. , 2.25, 2.5 , 2.75, 3. ])
+ >>> np.linspace(2.0, 3.0, num=5, endpoint=False)
+ array([ 2. , 2.2, 2.4, 2.6, 2.8])
+ >>> np.linspace(2.0, 3.0, num=5, retstep=True)
+ (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 8
+ >>> y = np.zeros(N)
+ >>> x1 = np.linspace(0, 10, N, endpoint=True)
+ >>> x2 = np.linspace(0, 10, N, endpoint=False)
+ >>> plt.plot(x1, y, 'o')
+ []
+ >>> plt.plot(x2, y + 0.5, 'o')
+ []
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ # 2016-02-25, 1.12
+ num = _index_deprecate(num)
+ if num < 0:
+ raise ValueError("Number of samples, %s, must be non-negative." % num)
+ div = (num - 1) if endpoint else num
+
+ # Convert float/complex array scalars to float, gh-3504
+ # and make sure one can use variables that have an __array_interface__, gh-6634
+ start = asanyarray(start) * 1.0
+ stop = asanyarray(stop) * 1.0
+
+ dt = result_type(start, stop, float(num))
+ if dtype is None:
+ dtype = dt
+
+ delta = stop - start
+ y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim)
+ # In-place multiplication y *= delta/div is faster, but prevents the multiplicant
+ # from overriding what class is produced, and thus prevents, e.g. use of Quantities,
+ # see gh-7142. Hence, we multiply in place only for standard scalar types.
+ _mult_inplace = _nx.isscalar(delta)
+ if num > 1:
+ step = delta / div
+ if _nx.any(step == 0):
+ # Special handling for denormal numbers, gh-5437
+ y /= div
+ if _mult_inplace:
+ y *= delta
+ else:
+ y = y * delta
+ else:
+ if _mult_inplace:
+ y *= step
+ else:
+ y = y * step
+ else:
+ # 0 and 1 item long sequences have an undefined step
+ step = NaN
+ # Multiply with delta to allow possible override of output class.
+ y = y * delta
+
+ y += start
+
+ if endpoint and num > 1:
+ y[-1] = stop
+
+ if axis != 0:
+ y = _nx.moveaxis(y, 0, axis)
+
+ if retstep:
+ return y.astype(dtype, copy=False), step
+ else:
+ return y.astype(dtype, copy=False)
+
+
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+ axis=0):
+ """
+ Return numbers spaced evenly on a log scale.
+
+ In linear space, the sequence starts at ``base ** start``
+ (`base` to the power of `start`) and ends with ``base ** stop``
+ (see `endpoint` below).
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ Parameters
+ ----------
+ start : array_like
+ ``base ** start`` is the starting value of the sequence.
+ stop : array_like
+ ``base ** stop`` is the final value of the sequence, unless `endpoint`
+ is False. In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ base : float, optional
+ The base of the log space. The step size between the elements in
+ ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
+ Default is 10.0.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples. Note that, when used with a float endpoint, the
+ endpoint may or may not be included.
+ linspace : Similar to logspace, but with the samples uniformly distributed
+ in linear space, instead of log space.
+ geomspace : Similar to logspace, but with endpoints specified directly.
+
+ Notes
+ -----
+ Logspace is equivalent to the code
+
+ >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
+ ... # doctest: +SKIP
+ >>> power(base, y).astype(dtype)
+ ... # doctest: +SKIP
+
+ Examples
+ --------
+ >>> np.logspace(2.0, 3.0, num=4)
+ array([ 100. , 215.443469 , 464.15888336, 1000. ])
+ >>> np.logspace(2.0, 3.0, num=4, endpoint=False)
+ array([ 100. , 177.827941 , 316.22776602, 562.34132519])
+ >>> np.logspace(2.0, 3.0, num=4, base=2.0)
+ array([ 4. , 5.0396842 , 6.34960421, 8. ])
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> x1 = np.logspace(0.1, 1, N, endpoint=True)
+ >>> x2 = np.logspace(0.1, 1, N, endpoint=False)
+ >>> y = np.zeros(N)
+ >>> plt.plot(x1, y, 'o')
+ []
+ >>> plt.plot(x2, y + 0.5, 'o')
+ []
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
+ if dtype is None:
+ return _nx.power(base, y)
+ return _nx.power(base, y).astype(dtype, copy=False)
+
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+ axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
+ """
+ Return numbers spaced evenly on a log scale (a geometric progression).
+
+ This is similar to `logspace`, but with endpoints specified directly.
+ Each output sample is a constant multiple of the previous.
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The final value of the sequence, unless `endpoint` is False.
+ In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ logspace : Similar to geomspace, but with endpoints specified using log
+ and base.
+ linspace : Similar to geomspace, but with arithmetic instead of geometric
+ progression.
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples.
+
+ Notes
+ -----
+ If the inputs or dtype are complex, the output will follow a logarithmic
+ spiral in the complex plane. (There are an infinite number of spirals
+ passing through two points; the output will follow the shortest such path.)
+
+ Examples
+ --------
+ >>> np.geomspace(1, 1000, num=4)
+ array([ 1., 10., 100., 1000.])
+ >>> np.geomspace(1, 1000, num=3, endpoint=False)
+ array([ 1., 10., 100.])
+ >>> np.geomspace(1, 1000, num=4, endpoint=False)
+ array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
+ >>> np.geomspace(1, 256, num=9)
+ array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
+
+ Note that the above may not produce exact integers:
+
+ >>> np.geomspace(1, 256, num=9, dtype=int)
+ array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
+ >>> np.around(np.geomspace(1, 256, num=9)).astype(int)
+ array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
+
+ Negative, decreasing, and complex inputs are allowed:
+
+ >>> np.geomspace(1000, 1, num=4)
+ array([ 1000., 100., 10., 1.])
+ >>> np.geomspace(-1000, -1, num=4)
+ array([-1000., -100., -10., -1.])
+ >>> np.geomspace(1j, 1000j, num=4) # Straight line
+ array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
+ >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
+ array([-1.00000000+0.j , -0.70710678+0.70710678j,
+ 0.00000000+1.j , 0.70710678+0.70710678j,
+ 1.00000000+0.j ])
+
+ Graphical illustration of ``endpoint`` parameter:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> y = np.zeros(N)
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+ >>> plt.axis([0.5, 2000, 0, 3])
+ >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
+ >>> plt.show()
+
+ """
+ start = asanyarray(start)
+ stop = asanyarray(stop)
+ if _nx.any(start == 0) or _nx.any(stop == 0):
+ raise ValueError('Geometric sequence cannot include zero')
+
+ dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
+ if dtype is None:
+ dtype = dt
+ else:
+ # complex to dtype('complex128'), for instance
+ dtype = _nx.dtype(dtype)
+
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise.
+ # Copy since we may change things in-place further down.
+ start = start.astype(dt, copy=True)
+ stop = stop.astype(dt, copy=True)
+
+ out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
+ # Avoid negligible real or imaginary parts in output by rotating to
+ # positive real, calculating, then undoing rotation
+ if _nx.issubdtype(dt, _nx.complexfloating):
+ all_imag = (start.real == 0.) & (stop.real == 0.)
+ if _nx.any(all_imag):
+ start[all_imag] = start[all_imag].imag
+ stop[all_imag] = stop[all_imag].imag
+ out_sign[all_imag] = 1j
+
+ both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
+ if _nx.any(both_negative):
+ _nx.negative(start, out=start, where=both_negative)
+ _nx.negative(stop, out=stop, where=both_negative)
+ _nx.negative(out_sign, out=out_sign, where=both_negative)
+
+ log_start = _nx.log10(start)
+ log_stop = _nx.log10(stop)
+ result = out_sign * logspace(log_start, log_stop, num=num,
+ endpoint=endpoint, base=10.0, dtype=dtype)
+ if axis != 0:
+ result = _nx.moveaxis(result, 0, axis)
+
+ return result.astype(dtype, copy=False)
+
+
+#always succeed
+def add_newdoc(place, obj, doc):
+ """
+ Adds documentation to obj which is in module place.
+
+ If doc is a string add it to obj as a docstring
+
+ If doc is a tuple, then the first element is interpreted as
+ an attribute of obj and the second as the docstring
+ (method, docstring)
+
+ If doc is a list, then each element of the list should be a
+ sequence of length two --> [(method1, docstring1),
+ (method2, docstring2), ...]
+
+ This routine never raises an error.
+
+ This routine cannot modify read-only docstrings, as appear
+ in new-style classes or built-in functions. Because this
+ routine never raises an error the caller must check manually
+ that the docstrings were changed.
+ """
+ try:
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ add_docstring(new, doc.strip())
+ elif isinstance(doc, tuple):
+ add_docstring(getattr(new, doc[0]), doc[1].strip())
+ elif isinstance(doc, list):
+ for val in doc:
+ add_docstring(getattr(new, val[0]), val[1].strip())
+ except Exception:
+ pass
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc
new file mode 100644
index 0000000..7fa35ae
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/function_base.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py
new file mode 100644
index 0000000..5e04fb8
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.py
@@ -0,0 +1,254 @@
+from __future__ import division, print_function
+
+import os
+import genapi
+
+from genapi import \
+ TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
+
+import numpy_api
+
+# use annotated api when running under cpychecker
+h_template = r"""
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+%s
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+%s
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+ int st;
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
+ return -1;
+ }
+
+#if PY_VERSION_HEX >= 0x03000000
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+#else
+ if (!PyCObject_Check(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
+#endif
+ Py_DECREF(c_api);
+ if (PyArray_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+ return -1;
+ }
+
+ /* Perform runtime check of C API version */
+ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "ABI version 0x%%x but this version of numpy is 0x%%x", \
+ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+ return -1;
+ }
+ if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "API version 0x%%x but this version of numpy is 0x%%x", \
+ (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
+ return -1;
+ }
+
+ /*
+ * Perform runtime check of endianness and check it matches the one set by
+ * the headers (npy_endian.h) as a safeguard
+ */
+ st = PyArray_GetEndianness();
+ if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
+ return -1;
+ }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+ if (st != NPY_CPU_BIG) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
+ "big endian, but detected different endianness at runtime");
+ return -1;
+ }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ if (st != NPY_CPU_LITTLE) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
+ "little endian, but detected different endianness at runtime");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+#if PY_VERSION_HEX >= 0x03000000
+#define NUMPY_IMPORT_ARRAY_RETVAL NULL
+#else
+#define NUMPY_IMPORT_ARRAY_RETVAL
+#endif
+
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
+
+#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
+
+#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
+
+#endif
+
+#endif
+"""
+
+
+c_template = r"""
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyArray_API[] = {
+%s
+};
+"""
+
+c_api_header = """
+===========
+NumPy C-API
+===========
+"""
+
+def generate_api(output_dir, force=False):
+ basename = 'multiarray_api'
+
+ h_file = os.path.join(output_dir, '__%s.h' % basename)
+ c_file = os.path.join(output_dir, '__%s.c' % basename)
+ d_file = os.path.join(output_dir, '%s.txt' % basename)
+ targets = (h_file, c_file, d_file)
+
+ sources = numpy_api.multiarray_api
+
+ if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
+ return targets
+ else:
+ do_generate_api(targets, sources)
+
+ return targets
+
+def do_generate_api(targets, sources):
+ header_file = targets[0]
+ c_file = targets[1]
+ doc_file = targets[2]
+
+ global_vars = sources[0]
+ scalar_bool_values = sources[1]
+ types_api = sources[2]
+ multiarray_funcs = sources[3]
+
+ multiarray_api = sources[:]
+
+ module_list = []
+ extension_list = []
+ init_list = []
+
+ # Check multiarray api indexes
+ multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
+ genapi.check_api_dict(multiarray_api_index)
+
+ numpyapi_list = genapi.get_api_functions('NUMPY_API',
+ multiarray_funcs)
+
+ # FIXME: ordered_funcs_api is unused
+ ordered_funcs_api = genapi.order_dict(multiarray_funcs)
+
+ # Create dict name -> *Api instance
+ api_name = 'PyArray_API'
+ multiarray_api_dict = {}
+ for f in numpyapi_list:
+ name = f.name
+ index = multiarray_funcs[name][0]
+ annotations = multiarray_funcs[name][1:]
+ multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
+ f.return_type,
+ f.args, api_name)
+
+ for name, val in global_vars.items():
+ index, type = val
+ multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
+
+ for name, val in scalar_bool_values.items():
+ index = val[0]
+ multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
+
+ for name, val in types_api.items():
+ index = val[0]
+ multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
+
+ if len(multiarray_api_dict) != len(multiarray_api_index):
+ keys_dict = set(multiarray_api_dict.keys())
+ keys_index = set(multiarray_api_index.keys())
+ raise AssertionError(
+ "Multiarray API size mismatch - "
+ "index has extra keys {}, dict has extra keys {}"
+ .format(keys_index - keys_dict, keys_dict - keys_index)
+ )
+
+ extension_list = []
+ for name, index in genapi.order_dict(multiarray_api_index):
+ api_item = multiarray_api_dict[name]
+ extension_list.append(api_item.define_from_array_api_string())
+ init_list.append(api_item.array_api_define())
+ module_list.append(api_item.internal_define())
+
+ # Write to header
+ s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
+ genapi.write_file(header_file, s)
+
+ # Write to c-code
+ s = c_template % ',\n'.join(init_list)
+ genapi.write_file(c_file, s)
+
+ # write to documentation
+ s = c_api_header
+ for func in numpyapi_list:
+ s += func.to_ReST()
+ s += '\n\n'
+ genapi.write_file(doc_file, s)
+
+ return targets
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc
new file mode 100644
index 0000000..8a03d2b
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/generate_numpy_api.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py
new file mode 100644
index 0000000..544b8b3
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.py
@@ -0,0 +1,550 @@
+"""Machine limits for Float32 and Float64 and (long double) if available...
+
+"""
+from __future__ import division, absolute_import, print_function
+
+__all__ = ['finfo', 'iinfo']
+
+import warnings
+
+from .machar import MachAr
+from .overrides import set_module
+from . import numeric
+from . import numerictypes as ntypes
+from .numeric import array, inf
+from .umath import log10, exp2
+from . import umath
+
+
+def _fr0(a):
+ """fix rank-0 --> rank-1"""
+ if a.ndim == 0:
+ a = a.copy()
+ a.shape = (1,)
+ return a
+
+
+def _fr1(a):
+ """fix rank > 0 --> rank-0"""
+ if a.size == 1:
+ a = a.copy()
+ a.shape = ()
+ return a
+
+class MachArLike(object):
+ """ Object to simulate MachAr instance """
+
+ def __init__(self,
+ ftype,
+ **kwargs):
+ params = _MACHAR_PARAMS[ftype]
+ float_conv = lambda v: array([v], ftype)
+ float_to_float = lambda v : _fr1(float_conv(v))
+ float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
+
+ self.title = params['title']
+ # Parameter types same as for discovered MachAr object.
+ self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
+ self.epsneg = float_to_float(kwargs.pop('epsneg'))
+ self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
+ self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
+ self.ibeta = params['itype'](kwargs.pop('ibeta'))
+ self.__dict__.update(kwargs)
+ self.precision = int(-log10(self.eps))
+ self.resolution = float_to_float(float_conv(10) ** (-self.precision))
+ self._str_eps = float_to_str(self.eps)
+ self._str_epsneg = float_to_str(self.epsneg)
+ self._str_xmin = float_to_str(self.xmin)
+ self._str_xmax = float_to_str(self.xmax)
+ self._str_resolution = float_to_str(self.resolution)
+
+_convert_to_float = {
+ ntypes.csingle: ntypes.single,
+ ntypes.complex_: ntypes.float_,
+ ntypes.clongfloat: ntypes.longfloat
+ }
+
+# Parameters for creating MachAr / MachAr-like objects
+_title_fmt = 'numpy {} precision floating point number'
+_MACHAR_PARAMS = {
+ ntypes.double: dict(
+ itype = ntypes.int64,
+ fmt = '%24.16e',
+ title = _title_fmt.format('double')),
+ ntypes.single: dict(
+ itype = ntypes.int32,
+ fmt = '%15.7e',
+ title = _title_fmt.format('single')),
+ ntypes.longdouble: dict(
+ itype = ntypes.longlong,
+ fmt = '%s',
+ title = _title_fmt.format('long double')),
+ ntypes.half: dict(
+ itype = ntypes.int16,
+ fmt = '%12.5e',
+ title = _title_fmt.format('half'))}
+
+# Key to identify the floating point type. Key is result of
+# ftype('-0.1').newbyteorder('<').tobytes()
+# See:
+# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+ _KNOWN_TYPES[bytepat] = machar
+_float_ma = {}
+
+def _register_known_types():
+ # Known parameters for float16
+ # See docstring of MachAr class for description of parameters.
+ f16 = ntypes.float16
+ float16_ma = MachArLike(f16,
+ machep=-10,
+ negep=-11,
+ minexp=-14,
+ maxexp=16,
+ it=10,
+ iexp=5,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f16(-10)),
+ epsneg=exp2(f16(-11)),
+ huge=f16(65504),
+ tiny=f16(2 ** -14))
+ _register_type(float16_ma, b'f\xae')
+ _float_ma[16] = float16_ma
+
+ # Known parameters for float32
+ f32 = ntypes.float32
+ float32_ma = MachArLike(f32,
+ machep=-23,
+ negep=-24,
+ minexp=-126,
+ maxexp=128,
+ it=23,
+ iexp=8,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f32(-23)),
+ epsneg=exp2(f32(-24)),
+ huge=f32((1 - 2 ** -24) * 2**128),
+ tiny=exp2(f32(-126)))
+ _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+ _float_ma[32] = float32_ma
+
+ # Known parameters for float64
+ f64 = ntypes.float64
+ epsneg_f64 = 2.0 ** -53.0
+ tiny_f64 = 2.0 ** -1022.0
+ float64_ma = MachArLike(f64,
+ machep=-52,
+ negep=-53,
+ minexp=-1022,
+ maxexp=1024,
+ it=52,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=2.0 ** -52.0,
+ epsneg=epsneg_f64,
+ huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+ tiny=tiny_f64)
+ _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ _float_ma[64] = float64_ma
+
+ # Known parameters for IEEE 754 128-bit binary float
+ ld = ntypes.longdouble
+ epsneg_f128 = exp2(ld(-113))
+ tiny_f128 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f128
+ with numeric.errstate(all='ignore'):
+ huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+ float128_ma = MachArLike(ld,
+ machep=-112,
+ negep=-113,
+ minexp=-16382,
+ maxexp=16384,
+ it=112,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-112)),
+ epsneg=epsneg_f128,
+ huge=huge_f128,
+ tiny=tiny_f128)
+ # IEEE 754 128-bit binary float
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _float_ma[128] = float128_ma
+
+ # Known parameters for float80 (Intel 80-bit extended precision)
+ epsneg_f80 = exp2(ld(-64))
+ tiny_f80 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f80
+ with numeric.errstate(all='ignore'):
+ huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+ float80_ma = MachArLike(ld,
+ machep=-63,
+ negep=-64,
+ minexp=-16382,
+ maxexp=16384,
+ it=63,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-63)),
+ epsneg=epsneg_f80,
+ huge=huge_f80,
+ tiny=tiny_f80)
+ # float80, first 10 bytes containing actual storage
+ _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+ _float_ma[80] = float80_ma
+
+ # Guessed / known parameters for double double; see:
+ # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+ # These numbers have the same exponent range as float64, but extended number of
+ # digits in the significand.
+ huge_dd = (umath.nextafter(ld(inf), ld(0))
+ if hasattr(umath, 'nextafter') # Missing on some platforms?
+ else float64_ma.huge)
+ float_dd_ma = MachArLike(ld,
+ machep=-105,
+ negep=-106,
+ minexp=-1022,
+ maxexp=1024,
+ it=105,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-105)),
+ epsneg= exp2(ld(-106)),
+ huge=huge_dd,
+ tiny=exp2(ld(-1022)))
+ # double double; low, high order (e.g. PPC 64)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ # double double; high, low order (e.g. PPC 64 le)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+ _float_ma['dd'] = float_dd_ma
+
+
+def _get_machar(ftype):
+ """ Get MachAr instance or MachAr-like instance
+
+ Get parameters for floating point type, by first trying signatures of
+ various known floating point types, then, if none match, attempting to
+ identify parameters by analysis.
+
+ Parameters
+ ----------
+ ftype : class
+ Numpy floating point type class (e.g. ``np.float64``)
+
+ Returns
+ -------
+ ma_like : instance of :class:`MachAr` or :class:`MachArLike`
+ Object giving floating point parameters for `ftype`.
+
+ Warns
+ -----
+ UserWarning
+ If the binary signature of the float type is not in the dictionary of
+ known float types.
+ """
+ params = _MACHAR_PARAMS.get(ftype)
+ if params is None:
+ raise ValueError(repr(ftype))
+ # Detect known / suspected types
+ key = ftype('-0.1').newbyteorder('<').tobytes()
+ ma_like = _KNOWN_TYPES.get(key)
+ # Could be 80 bit == 10 byte extended precision, where last bytes can be
+ # random garbage. Try comparing first 10 bytes to pattern.
+ if ma_like is None and ftype == ntypes.longdouble:
+ ma_like = _KNOWN_TYPES.get(key[:10])
+ if ma_like is not None:
+ return ma_like
+ # Fall back to parameter discovery
+ warnings.warn(
+ 'Signature {} for {} does not match any known type: '
+ 'falling back to type probe function'.format(key, ftype),
+ UserWarning, stacklevel=2)
+ return _discovered_machar(ftype)
+
+
+def _discovered_machar(ftype):
+ """ Create MachAr instance with found information on float types
+ """
+ params = _MACHAR_PARAMS[ftype]
+ return MachAr(lambda v: array([v], ftype),
+ lambda v:_fr0(v.astype(params['itype']))[0],
+ lambda v:array(_fr0(v)[0], ftype),
+ lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
+ params['title'])
+
+
+@set_module('numpy')
+class finfo(object):
+ """
+ finfo(dtype)
+
+ Machine limits for floating point types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ eps : float
+ The smallest representable positive number such that
+ ``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
+ point type.
+ epsneg : floating point number of the appropriate type
+ The smallest representable positive number such that
+ ``1.0 - epsneg != 1.0``.
+ iexp : int
+ The number of bits in the exponent portion of the floating point
+ representation.
+ machar : MachAr
+ The object which calculated these parameters and holds more
+ detailed information.
+ machep : int
+ The exponent that yields `eps`.
+ max : floating point number of the appropriate type
+ The largest representable number.
+ maxexp : int
+ The smallest positive power of the base (2) that causes overflow.
+ min : floating point number of the appropriate type
+ The smallest representable number, typically ``-max``.
+ minexp : int
+ The most negative power of the base (2) consistent with there
+ being no leading 0's in the mantissa.
+ negep : int
+ The exponent that yields `epsneg`.
+ nexp : int
+ The number of bits in the exponent including its sign and bias.
+ nmant : int
+ The number of bits in the mantissa.
+ precision : int
+ The approximate number of decimal digits to which this kind of
+ float is precise.
+ resolution : floating point number of the appropriate type
+ The approximate decimal resolution of this type, i.e.,
+ ``10**-precision``.
+ tiny : float
+ The smallest positive usable number. Type of `tiny` is an
+ appropriate floating point type.
+
+ Parameters
+ ----------
+ dtype : float, dtype, or instance
+ Kind of floating point data-type about which to get information.
+
+ See Also
+ --------
+ MachAr : The implementation of the tests that produce this information.
+ iinfo : The equivalent for integer data types.
+
+ Notes
+ -----
+ For developers of NumPy: do not instantiate this at the module level.
+ The initial calculation of these parameters is expensive and negatively
+ impacts import times. These objects are cached, so calling ``finfo()``
+ repeatedly inside your functions is not a problem.
+
+ """
+
+ _finfo_cache = {}
+
+ def __new__(cls, dtype):
+ try:
+ dtype = numeric.dtype(dtype)
+ except TypeError:
+ # In case a float instance was given
+ dtype = numeric.dtype(type(dtype))
+
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ dtypes = [dtype]
+ newdtype = numeric.obj2sctype(dtype)
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ if not issubclass(dtype, numeric.inexact):
+ raise ValueError("data type %r not inexact" % (dtype))
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ if not issubclass(dtype, numeric.floating):
+ newdtype = _convert_to_float[dtype]
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ obj = object.__new__(cls)._init(dtype)
+ for dt in dtypes:
+ cls._finfo_cache[dt] = obj
+ return obj
+
+ def _init(self, dtype):
+ self.dtype = numeric.dtype(dtype)
+ machar = _get_machar(dtype)
+
+ for word in ['precision', 'iexp',
+ 'maxexp', 'minexp', 'negep',
+ 'machep']:
+ setattr(self, word, getattr(machar, word))
+ for word in ['tiny', 'resolution', 'epsneg']:
+ setattr(self, word, getattr(machar, word).flat[0])
+ self.bits = self.dtype.itemsize * 8
+ self.max = machar.huge.flat[0]
+ self.min = -self.max
+ self.eps = machar.eps.flat[0]
+ self.nexp = machar.iexp
+ self.nmant = machar.it
+ self.machar = machar
+ self._str_tiny = machar._str_xmin.strip()
+ self._str_max = machar._str_xmax.strip()
+ self._str_epsneg = machar._str_epsneg.strip()
+ self._str_eps = machar._str_eps.strip()
+ self._str_resolution = machar._str_resolution.strip()
+ return self
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'precision = %(precision)3s resolution = %(_str_resolution)s\n'
+ 'machep = %(machep)6s eps = %(_str_eps)s\n'
+ 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
+ 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
+ 'maxexp = %(maxexp)6s max = %(_str_max)s\n'
+ 'nexp = %(nexp)6s min = -max\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+ def __repr__(self):
+ c = self.__class__.__name__
+ d = self.__dict__.copy()
+ d['klass'] = c
+ return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
+ " max=%(_str_max)s, dtype=%(dtype)s)") % d)
+
+
+@set_module('numpy')
+class iinfo(object):
+ """
+ iinfo(type)
+
+ Machine limits for integer types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ min : int
+ The smallest integer expressible by the type.
+ max : int
+ The largest integer expressible by the type.
+
+ Parameters
+ ----------
+ int_type : integer type, dtype, or instance
+ The kind of integer data type to get information about.
+
+ See Also
+ --------
+ finfo : The equivalent for floating point data types.
+
+ Examples
+ --------
+ With types:
+
+ >>> ii16 = np.iinfo(np.int16)
+ >>> ii16.min
+ -32768
+ >>> ii16.max
+ 32767
+ >>> ii32 = np.iinfo(np.int32)
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ With instances:
+
+ >>> ii32 = np.iinfo(np.int32(10))
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ """
+
+ _min_vals = {}
+ _max_vals = {}
+
+ def __init__(self, int_type):
+ try:
+ self.dtype = numeric.dtype(int_type)
+ except TypeError:
+ self.dtype = numeric.dtype(type(int_type))
+ self.kind = self.dtype.kind
+ self.bits = self.dtype.itemsize * 8
+ self.key = "%s%d" % (self.kind, self.bits)
+ if self.kind not in 'iu':
+ raise ValueError("Invalid integer data type %r." % (self.kind,))
+
+ def min(self):
+ """Minimum value of given dtype."""
+ if self.kind == 'u':
+ return 0
+ else:
+ try:
+ val = iinfo._min_vals[self.key]
+ except KeyError:
+ val = int(-(1 << (self.bits-1)))
+ iinfo._min_vals[self.key] = val
+ return val
+
+ min = property(min)
+
+ def max(self):
+ """Maximum value of given dtype."""
+ try:
+ val = iinfo._max_vals[self.key]
+ except KeyError:
+ if self.kind == 'u':
+ val = int((1 << self.bits) - 1)
+ else:
+ val = int((1 << (self.bits-1)) - 1)
+ iinfo._max_vals[self.key] = val
+ return val
+
+ max = property(max)
+
+ def __str__(self):
+ """String representation."""
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'min = %(min)s\n'
+ 'max = %(max)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+
+ def __repr__(self):
+ return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
+ self.min, self.max, self.dtype)
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc
new file mode 100644
index 0000000..0eb30ab
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/getlimits.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h
new file mode 100644
index 0000000..e5b1cf2
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__multiarray_api.h
@@ -0,0 +1,1554 @@
+
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \
+ (void);
+extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArray_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+
+extern NPY_NO_EXPORT int NPY_NUMUSERTYPES;
+
+extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
+
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type;
+
+NPY_NO_EXPORT int PyArray_SetNumericOps \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \
+ (void);
+NPY_NO_EXPORT int PyArray_INCREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_XDECREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT void PyArray_SetStringFunction \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \
+ (int);
+NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \
+ (int);
+NPY_NO_EXPORT char * PyArray_Zero \
+ (PyArrayObject *);
+NPY_NO_EXPORT char * PyArray_One \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_CastToType \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_CastTo \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CastAnyTo \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CanCastSafely \
+ (int, int);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTo \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_ObjectType \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \
+ (PyObject *, int *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \
+ (PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_Size \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Scalar \
+ (void *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_ScalarAsCtype \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_CastScalarToCtype \
+ (PyObject *, void *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_CastScalarDirect \
+ (PyObject *, PyArray_Descr *, void *, int);
+NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \
+ (PyObject *);
+NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \
+ (PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_FromDims \
+ (int, int *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \
+ (int, int *, PyArray_Descr *, char *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \
+ (PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromFile \
+ (FILE *, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromString \
+ (char *, npy_intp, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromBuffer \
+ (PyObject *, PyArray_Descr *, npy_intp, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \
+ (PyObject *, PyArray_Descr *, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) PyObject * PyArray_GetField \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(2) int PyArray_SetField \
+ (PyArrayObject *, PyArray_Descr *, int, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Byteswap \
+ (PyArrayObject *, npy_bool);
+NPY_NO_EXPORT PyObject * PyArray_Resize \
+ (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER);
+NPY_NO_EXPORT int PyArray_MoveInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyAnyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_NewCopy \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_ToList \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ToString \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT int PyArray_ToFile \
+ (PyArrayObject *, FILE *, char *, char *);
+NPY_NO_EXPORT int PyArray_Dump \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Dumps \
+ (PyObject *, int);
+NPY_NO_EXPORT int PyArray_ValidType \
+ (int);
+NPY_NO_EXPORT void PyArray_UpdateFlags \
+ (PyArrayObject *, int);
+NPY_NO_EXPORT NPY_GCC_NONNULL(1) PyObject * PyArray_New \
+ (PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) NPY_GCC_NONNULL(1) NPY_GCC_NONNULL(2) PyObject * PyArray_NewFromDescr \
+ (PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \
+ (PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \
+ (int);
+NPY_NO_EXPORT double PyArray_GetPriority \
+ (PyObject *, double);
+NPY_NO_EXPORT PyObject * PyArray_IterNew \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MultiIterNew \
+ (int, ...);
+NPY_NO_EXPORT int PyArray_PyIntAsInt \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \
+ (PyObject *);
+NPY_NO_EXPORT int PyArray_Broadcast \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT void PyArray_FillObjectArray \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT int PyArray_FillWithScalar \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT npy_bool PyArray_CheckStrides \
+ (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \
+ (PyArray_Descr *, char);
+NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \
+ (PyObject *, int *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_FromInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \
+ (PyObject *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \
+ (int, PyArrayObject **);
+NPY_NO_EXPORT int PyArray_CanCoerceScalar \
+ (int, int, NPY_SCALARKIND);
+NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \
+ (PyObject *);
+NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \
+ (PyTypeObject *, PyTypeObject *);
+NPY_NO_EXPORT int PyArray_CompareUCS4 \
+ (npy_ucs4 *, npy_ucs4 *, size_t);
+NPY_NO_EXPORT int PyArray_RemoveSmallest \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT int PyArray_ElementStrides \
+ (PyObject *);
+NPY_NO_EXPORT void PyArray_Item_INCREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_Item_XDECREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT PyObject * PyArray_FieldNames \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Transpose \
+ (PyArrayObject *, PyArray_Dims *);
+NPY_NO_EXPORT PyObject * PyArray_TakeFrom \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutTo \
+ (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutMask \
+ (PyArrayObject *, PyObject*, PyObject*);
+NPY_NO_EXPORT PyObject * PyArray_Repeat \
+ (PyArrayObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Choose \
+ (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT int PyArray_Sort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgSort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_SearchSorted \
+ (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMax \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMin \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Reshape \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Newshape \
+ (PyArrayObject *, PyArray_Dims *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Squeeze \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \
+ (PyArrayObject *, PyArray_Descr *, PyTypeObject *);
+NPY_NO_EXPORT PyObject * PyArray_SwapAxes \
+ (PyArrayObject *, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Max \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Min \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Ptp \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Mean \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Trace \
+ (PyArrayObject *, int, int, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Diagonal \
+ (PyArrayObject *, int, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Clip \
+ (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Conjugate \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Nonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Std \
+ (PyArrayObject *, int, int, PyArrayObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Sum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumSum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Prod \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumProd \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_All \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Any \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Compress \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Flatten \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Ravel \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT npy_intp PyArray_MultiplyList \
+ (npy_intp *, int);
+NPY_NO_EXPORT int PyArray_MultiplyIntList \
+ (int *, int);
+NPY_NO_EXPORT void * PyArray_GetPtr \
+ (PyArrayObject *, npy_intp*);
+NPY_NO_EXPORT int PyArray_CompareLists \
+ (npy_intp *, npy_intp *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \
+ (PyObject **, void *, npy_intp *, int, PyArray_Descr*);
+NPY_NO_EXPORT int PyArray_As1D \
+ (PyObject **, char **, int *, int);
+NPY_NO_EXPORT int PyArray_As2D \
+ (PyObject **, char ***, int *, int *, int);
+NPY_NO_EXPORT int PyArray_Free \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_Converter \
+ (PyObject *, PyObject **);
+NPY_NO_EXPORT int PyArray_IntpFromSequence \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT PyObject * PyArray_Concatenate \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_InnerProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Correlate \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT int PyArray_TypestrConvert \
+ (int, int);
+NPY_NO_EXPORT int PyArray_DescrConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_IntpConverter \
+ (PyObject *, PyArray_Dims *);
+NPY_NO_EXPORT int PyArray_BufferConverter \
+ (PyObject *, PyArray_Chunk *);
+NPY_NO_EXPORT int PyArray_AxisConverter \
+ (PyObject *, int *);
+NPY_NO_EXPORT int PyArray_BoolConverter \
+ (PyObject *, npy_bool *);
+NPY_NO_EXPORT int PyArray_ByteorderConverter \
+ (PyObject *, char *);
+NPY_NO_EXPORT int PyArray_OrderConverter \
+ (PyObject *, NPY_ORDER *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \
+ (int, npy_intp *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \
+ (int, npy_intp *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_Where \
+ (PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Arange \
+ (double, double, double, int);
+NPY_NO_EXPORT PyObject * PyArray_ArangeObj \
+ (PyObject *, PyObject *, PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_SortkindConverter \
+ (PyObject *, NPY_SORTKIND *);
+NPY_NO_EXPORT PyObject * PyArray_LexSort \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Round \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \
+ (int, int);
+NPY_NO_EXPORT int PyArray_RegisterDataType \
+ (PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_RegisterCastFunc \
+ (PyArray_Descr *, int, PyArray_VectorUnaryFunc *);
+NPY_NO_EXPORT int PyArray_RegisterCanCast \
+ (PyArray_Descr *, int, NPY_SCALARKIND);
+NPY_NO_EXPORT void PyArray_InitArrFuncs \
+ (PyArray_ArrFuncs *);
+NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \
+ (int, npy_intp *);
+NPY_NO_EXPORT int PyArray_TypeNumFromName \
+ (char *);
+NPY_NO_EXPORT int PyArray_ClipmodeConverter \
+ (PyObject *, NPY_CLIPMODE *);
+NPY_NO_EXPORT int PyArray_OutputConverter \
+ (PyObject *, PyArrayObject **);
+NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT void _PyArray_SigintHandler \
+ (int);
+NPY_NO_EXPORT void* _PyArray_GetSigintBuf \
+ (void);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_SearchsideConverter \
+ (PyObject *, void *);
+NPY_NO_EXPORT PyObject * PyArray_CheckAxis \
+ (PyArrayObject *, int *, int);
+NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \
+ (npy_intp *, int);
+NPY_NO_EXPORT int PyArray_CompareString \
+ (char *, char *, size_t);
+NPY_NO_EXPORT PyObject * PyArray_MultiIterFromObjects \
+ (PyObject **, int, int, ...);
+NPY_NO_EXPORT int PyArray_GetEndianness \
+ (void);
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \
+ (void);
+NPY_NO_EXPORT PyObject * PyArray_Correlate2 \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \
+ (PyArrayIterObject *, npy_intp *, int, PyArrayObject*);
+extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject NpyIter_Type;
+
+NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \
+ (PyObject *);
+NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \
+ (npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *);
+NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \
+ (npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *);
+NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \
+ (NPY_DATETIMEUNIT, npy_datetimestruct *);
+NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \
+ (NPY_DATETIMEUNIT, npy_timedeltastruct *);
+NPY_NO_EXPORT NpyIter * NpyIter_New \
+ (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*);
+NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **);
+NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp);
+NPY_NO_EXPORT NpyIter * NpyIter_Copy \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Deallocate \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_EnableExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Reset \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_ResetBasePointers \
+ (NpyIter *, char **, char **);
+NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \
+ (NpyIter *, npy_intp, npy_intp, char **);
+NPY_NO_EXPORT int NpyIter_GetNDim \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetNOp \
+ (NpyIter *);
+NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \
+ (NpyIter *, char **);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetIterIndexRange \
+ (NpyIter *, npy_intp *, npy_intp *);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIterIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetShape \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_GotoMultiIndex \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT void NpyIter_GetReadFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_GetWriteFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_DebugPrint \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT int NpyIter_RemoveAxis \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \
+ (NpyIter *);
+NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \
+ (NpyIter *, npy_intp, npy_intp *);
+NPY_NO_EXPORT int PyArray_CastingConverter \
+ (PyObject *, NPY_CASTING *);
+NPY_NO_EXPORT npy_intp PyArray_CountNonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \
+ (npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **);
+NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \
+ (PyArrayObject *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \
+ (PyArray_Descr *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \
+ (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) NPY_GCC_NONNULL(1) PyObject * PyArray_NewLikeArray \
+ (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \
+ (PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *);
+NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \
+ (PyObject *, NPY_CLIPMODE *, int);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \
+ (PyObject *, PyObject *, PyArrayObject*);
+NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \
+ (NpyIter *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \
+ (int, npy_intp *, npy_stride_sort_item *);
+NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \
+ (PyArrayObject *, npy_bool *);
+NPY_NO_EXPORT void PyArray_DebugPrint \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_FailUnlessWriteable \
+ (PyArrayObject *, const char *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT void * PyDataMem_NEW \
+ (size_t);
+NPY_NO_EXPORT void PyDataMem_FREE \
+ (void *);
+NPY_NO_EXPORT void * PyDataMem_RENEW \
+ (void *, size_t);
+NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \
+ (PyDataMem_EventHookFunc *, void *, void **);
+extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING;
+
+NPY_NO_EXPORT void PyArray_MapIterSwapAxes \
+ (PyArrayMapIterObject *, PyArrayObject **, int);
+NPY_NO_EXPORT PyObject * PyArray_MapIterArray \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT void PyArray_MapIterNext \
+ (PyArrayMapIterObject *);
+NPY_NO_EXPORT int PyArray_Partition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgPartition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT int PyArray_SelectkindConverter \
+ (PyObject *, NPY_SELECTKIND *);
+NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \
+ (size_t, size_t);
+NPY_NO_EXPORT NPY_GCC_NONNULL(1) int PyArray_CheckAnyScalarExact \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+#define PyArray_GetNDArrayCVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[0])
+#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1])
+#define PyArray_Type (*(PyTypeObject *)PyArray_API[2])
+#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3])
+#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4])
+#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5])
+#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6])
+#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7])
+#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8])
+#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9])
+#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10])
+#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11])
+#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12])
+#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13])
+#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14])
+#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15])
+#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16])
+#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17])
+#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18])
+#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19])
+#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20])
+#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21])
+#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22])
+#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23])
+#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24])
+#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25])
+#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26])
+#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27])
+#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28])
+#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29])
+#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30])
+#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31])
+#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32])
+#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33])
+#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34])
+#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35])
+#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36])
+#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37])
+#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38])
+#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39])
+#define PyArray_SetNumericOps \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[40])
+#define PyArray_GetNumericOps \
+ (*(PyObject * (*)(void)) \
+ PyArray_API[41])
+#define PyArray_INCREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[42])
+#define PyArray_XDECREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[43])
+#define PyArray_SetStringFunction \
+ (*(void (*)(PyObject *, int)) \
+ PyArray_API[44])
+#define PyArray_DescrFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[45])
+#define PyArray_TypeObjectFromType \
+ (*(PyObject * (*)(int)) \
+ PyArray_API[46])
+#define PyArray_Zero \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[47])
+#define PyArray_One \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[48])
+#define PyArray_CastToType \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[49])
+#define PyArray_CastTo \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[50])
+#define PyArray_CastAnyTo \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[51])
+#define PyArray_CanCastSafely \
+ (*(int (*)(int, int)) \
+ PyArray_API[52])
+#define PyArray_CanCastTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[53])
+#define PyArray_ObjectType \
+ (*(int (*)(PyObject *, int)) \
+ PyArray_API[54])
+#define PyArray_DescrFromObject \
+ (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[55])
+#define PyArray_ConvertToCommonType \
+ (*(PyArrayObject ** (*)(PyObject *, int *)) \
+ PyArray_API[56])
+#define PyArray_DescrFromScalar \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[57])
+#define PyArray_DescrFromTypeObject \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[58])
+#define PyArray_Size \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[59])
+#define PyArray_Scalar \
+ (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[60])
+#define PyArray_FromScalar \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[61])
+#define PyArray_ScalarAsCtype \
+ (*(void (*)(PyObject *, void *)) \
+ PyArray_API[62])
+#define PyArray_CastScalarToCtype \
+ (*(int (*)(PyObject *, void *, PyArray_Descr *)) \
+ PyArray_API[63])
+#define PyArray_CastScalarDirect \
+ (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \
+ PyArray_API[64])
+#define PyArray_ScalarFromObject \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[65])
+#define PyArray_GetCastFunc \
+ (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \
+ PyArray_API[66])
+#define PyArray_FromDims \
+ (*(PyObject * (*)(int, int *, int)) \
+ PyArray_API[67])
+#define PyArray_FromDimsAndDataAndDescr \
+ (*(PyObject * (*)(int, int *, PyArray_Descr *, char *)) \
+ PyArray_API[68])
+#define PyArray_FromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[69])
+#define PyArray_EnsureArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[70])
+#define PyArray_EnsureAnyArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[71])
+#define PyArray_FromFile \
+ (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[72])
+#define PyArray_FromString \
+ (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[73])
+#define PyArray_FromBuffer \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \
+ PyArray_API[74])
+#define PyArray_FromIter \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \
+ PyArray_API[75])
+#define PyArray_Return \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[76])
+#define PyArray_GetField \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[77])
+#define PyArray_SetField \
+ (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \
+ PyArray_API[78])
+#define PyArray_Byteswap \
+ (*(PyObject * (*)(PyArrayObject *, npy_bool)) \
+ PyArray_API[79])
+#define PyArray_Resize \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER)) \
+ PyArray_API[80])
+#define PyArray_MoveInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[81])
+#define PyArray_CopyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[82])
+#define PyArray_CopyAnyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[83])
+#define PyArray_CopyObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[84])
+#define PyArray_NewCopy \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[85])
+#define PyArray_ToList \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[86])
+#define PyArray_ToString \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[87])
+#define PyArray_ToFile \
+ (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \
+ PyArray_API[88])
+#define PyArray_Dump \
+ (*(int (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[89])
+#define PyArray_Dumps \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[90])
+#define PyArray_ValidType \
+ (*(int (*)(int)) \
+ PyArray_API[91])
+#define PyArray_UpdateFlags \
+ (*(void (*)(PyArrayObject *, int)) \
+ PyArray_API[92])
+#define PyArray_New \
+ (*(PyObject * (*)(PyTypeObject *, int, npy_intp *, int, npy_intp *, void *, int, int, PyObject *)) \
+ PyArray_API[93])
+#define PyArray_NewFromDescr \
+ (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp *, npy_intp *, void *, int, PyObject *)) \
+ PyArray_API[94])
+#define PyArray_DescrNew \
+ (*(PyArray_Descr * (*)(PyArray_Descr *)) \
+ PyArray_API[95])
+#define PyArray_DescrNewFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[96])
+#define PyArray_GetPriority \
+ (*(double (*)(PyObject *, double)) \
+ PyArray_API[97])
+#define PyArray_IterNew \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[98])
+#define PyArray_MultiIterNew \
+ (*(PyObject * (*)(int, ...)) \
+ PyArray_API[99])
+#define PyArray_PyIntAsInt \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[100])
+#define PyArray_PyIntAsIntp \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[101])
+#define PyArray_Broadcast \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[102])
+#define PyArray_FillObjectArray \
+ (*(void (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[103])
+#define PyArray_FillWithScalar \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[104])
+#define PyArray_CheckStrides \
+ (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)) \
+ PyArray_API[105])
+#define PyArray_DescrNewByteorder \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \
+ PyArray_API[106])
+#define PyArray_IterAllButAxis \
+ (*(PyObject * (*)(PyObject *, int *)) \
+ PyArray_API[107])
+#define PyArray_CheckFromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[108])
+#define PyArray_FromArray \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[109])
+#define PyArray_FromInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[110])
+#define PyArray_FromStructInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[111])
+#define PyArray_FromArrayAttr \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[112])
+#define PyArray_ScalarKind \
+ (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \
+ PyArray_API[113])
+#define PyArray_CanCoerceScalar \
+ (*(int (*)(int, int, NPY_SCALARKIND)) \
+ PyArray_API[114])
+#define PyArray_NewFlagsObject \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[115])
+#define PyArray_CanCastScalar \
+ (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \
+ PyArray_API[116])
+#define PyArray_CompareUCS4 \
+ (*(int (*)(npy_ucs4 *, npy_ucs4 *, size_t)) \
+ PyArray_API[117])
+#define PyArray_RemoveSmallest \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[118])
+#define PyArray_ElementStrides \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[119])
+#define PyArray_Item_INCREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[120])
+#define PyArray_Item_XDECREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[121])
+#define PyArray_FieldNames \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[122])
+#define PyArray_Transpose \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \
+ PyArray_API[123])
+#define PyArray_TakeFrom \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[124])
+#define PyArray_PutTo \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \
+ PyArray_API[125])
+#define PyArray_PutMask \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \
+ PyArray_API[126])
+#define PyArray_Repeat \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \
+ PyArray_API[127])
+#define PyArray_Choose \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[128])
+#define PyArray_Sort \
+ (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[129])
+#define PyArray_ArgSort \
+ (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[130])
+#define PyArray_SearchSorted \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \
+ PyArray_API[131])
+#define PyArray_ArgMax \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[132])
+#define PyArray_ArgMin \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[133])
+#define PyArray_Reshape \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[134])
+#define PyArray_Newshape \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \
+ PyArray_API[135])
+#define PyArray_Squeeze \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[136])
+#define PyArray_View \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \
+ PyArray_API[137])
+#define PyArray_SwapAxes \
+ (*(PyObject * (*)(PyArrayObject *, int, int)) \
+ PyArray_API[138])
+#define PyArray_Max \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[139])
+#define PyArray_Min \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[140])
+#define PyArray_Ptp \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[141])
+#define PyArray_Mean \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[142])
+#define PyArray_Trace \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \
+ PyArray_API[143])
+#define PyArray_Diagonal \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int)) \
+ PyArray_API[144])
+#define PyArray_Clip \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \
+ PyArray_API[145])
+#define PyArray_Conjugate \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[146])
+#define PyArray_Nonzero \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[147])
+#define PyArray_Std \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \
+ PyArray_API[148])
+#define PyArray_Sum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[149])
+#define PyArray_CumSum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[150])
+#define PyArray_Prod \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[151])
+#define PyArray_CumProd \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[152])
+#define PyArray_All \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[153])
+#define PyArray_Any \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[154])
+#define PyArray_Compress \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+ PyArray_API[155])
+#define PyArray_Flatten \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[156])
+#define PyArray_Ravel \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[157])
+#define PyArray_MultiplyList \
+ (*(npy_intp (*)(npy_intp *, int)) \
+ PyArray_API[158])
+#define PyArray_MultiplyIntList \
+ (*(int (*)(int *, int)) \
+ PyArray_API[159])
+#define PyArray_GetPtr \
+ (*(void * (*)(PyArrayObject *, npy_intp*)) \
+ PyArray_API[160])
+#define PyArray_CompareLists \
+ (*(int (*)(npy_intp *, npy_intp *, int)) \
+ PyArray_API[161])
+#define PyArray_AsCArray \
+ (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \
+ PyArray_API[162])
+#define PyArray_As1D \
+ (*(int (*)(PyObject **, char **, int *, int)) \
+ PyArray_API[163])
+#define PyArray_As2D \
+ (*(int (*)(PyObject **, char ***, int *, int *, int)) \
+ PyArray_API[164])
+#define PyArray_Free \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[165])
+#define PyArray_Converter \
+ (*(int (*)(PyObject *, PyObject **)) \
+ PyArray_API[166])
+#define PyArray_IntpFromSequence \
+ (*(int (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[167])
+#define PyArray_Concatenate \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[168])
+#define PyArray_InnerProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[169])
+#define PyArray_MatrixProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[170])
+#define PyArray_CopyAndTranspose \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[171])
+#define PyArray_Correlate \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[172])
+#define PyArray_TypestrConvert \
+ (*(int (*)(int, int)) \
+ PyArray_API[173])
+#define PyArray_DescrConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[174])
+#define PyArray_DescrConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[175])
+#define PyArray_IntpConverter \
+ (*(int (*)(PyObject *, PyArray_Dims *)) \
+ PyArray_API[176])
+#define PyArray_BufferConverter \
+ (*(int (*)(PyObject *, PyArray_Chunk *)) \
+ PyArray_API[177])
+#define PyArray_AxisConverter \
+ (*(int (*)(PyObject *, int *)) \
+ PyArray_API[178])
+#define PyArray_BoolConverter \
+ (*(int (*)(PyObject *, npy_bool *)) \
+ PyArray_API[179])
+#define PyArray_ByteorderConverter \
+ (*(int (*)(PyObject *, char *)) \
+ PyArray_API[180])
+#define PyArray_OrderConverter \
+ (*(int (*)(PyObject *, NPY_ORDER *)) \
+ PyArray_API[181])
+#define PyArray_EquivTypes \
+ (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[182])
+#define PyArray_Zeros \
+ (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \
+ PyArray_API[183])
+#define PyArray_Empty \
+ (*(PyObject * (*)(int, npy_intp *, PyArray_Descr *, int)) \
+ PyArray_API[184])
+#define PyArray_Where \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \
+ PyArray_API[185])
+#define PyArray_Arange \
+ (*(PyObject * (*)(double, double, double, int)) \
+ PyArray_API[186])
+#define PyArray_ArangeObj \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \
+ PyArray_API[187])
+#define PyArray_SortkindConverter \
+ (*(int (*)(PyObject *, NPY_SORTKIND *)) \
+ PyArray_API[188])
+#define PyArray_LexSort \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[189])
+#define PyArray_Round \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[190])
+#define PyArray_EquivTypenums \
+ (*(unsigned char (*)(int, int)) \
+ PyArray_API[191])
+#define PyArray_RegisterDataType \
+ (*(int (*)(PyArray_Descr *)) \
+ PyArray_API[192])
+#define PyArray_RegisterCastFunc \
+ (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \
+ PyArray_API[193])
+#define PyArray_RegisterCanCast \
+ (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \
+ PyArray_API[194])
+#define PyArray_InitArrFuncs \
+ (*(void (*)(PyArray_ArrFuncs *)) \
+ PyArray_API[195])
+#define PyArray_IntTupleFromIntp \
+ (*(PyObject * (*)(int, npy_intp *)) \
+ PyArray_API[196])
+#define PyArray_TypeNumFromName \
+ (*(int (*)(char *)) \
+ PyArray_API[197])
+#define PyArray_ClipmodeConverter \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *)) \
+ PyArray_API[198])
+#define PyArray_OutputConverter \
+ (*(int (*)(PyObject *, PyArrayObject **)) \
+ PyArray_API[199])
+#define PyArray_BroadcastToShape \
+ (*(PyObject * (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[200])
+#define _PyArray_SigintHandler \
+ (*(void (*)(int)) \
+ PyArray_API[201])
+#define _PyArray_GetSigintBuf \
+ (*(void* (*)(void)) \
+ PyArray_API[202])
+#define PyArray_DescrAlignConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[203])
+#define PyArray_DescrAlignConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[204])
+#define PyArray_SearchsideConverter \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[205])
+#define PyArray_CheckAxis \
+ (*(PyObject * (*)(PyArrayObject *, int *, int)) \
+ PyArray_API[206])
+#define PyArray_OverflowMultiplyList \
+ (*(npy_intp (*)(npy_intp *, int)) \
+ PyArray_API[207])
+#define PyArray_CompareString \
+ (*(int (*)(char *, char *, size_t)) \
+ PyArray_API[208])
+#define PyArray_MultiIterFromObjects \
+ (*(PyObject * (*)(PyObject **, int, int, ...)) \
+ PyArray_API[209])
+#define PyArray_GetEndianness \
+ (*(int (*)(void)) \
+ PyArray_API[210])
+#define PyArray_GetNDArrayCFeatureVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[211])
+#define PyArray_Correlate2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[212])
+#define PyArray_NeighborhoodIterNew \
+ (*(PyObject* (*)(PyArrayIterObject *, npy_intp *, int, PyArrayObject*)) \
+ PyArray_API[213])
+#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214])
+#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215])
+#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216])
+#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217])
+#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218])
+#define PyArray_SetDatetimeParseFunction \
+ (*(void (*)(PyObject *)) \
+ PyArray_API[219])
+#define PyArray_DatetimeToDatetimeStruct \
+ (*(void (*)(npy_datetime, NPY_DATETIMEUNIT, npy_datetimestruct *)) \
+ PyArray_API[220])
+#define PyArray_TimedeltaToTimedeltaStruct \
+ (*(void (*)(npy_timedelta, NPY_DATETIMEUNIT, npy_timedeltastruct *)) \
+ PyArray_API[221])
+#define PyArray_DatetimeStructToDatetime \
+ (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_datetimestruct *)) \
+ PyArray_API[222])
+#define PyArray_TimedeltaStructToTimedelta \
+ (*(npy_datetime (*)(NPY_DATETIMEUNIT, npy_timedeltastruct *)) \
+ PyArray_API[223])
+#define NpyIter_New \
+ (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \
+ PyArray_API[224])
+#define NpyIter_MultiNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \
+ PyArray_API[225])
+#define NpyIter_AdvancedNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \
+ PyArray_API[226])
+#define NpyIter_Copy \
+ (*(NpyIter * (*)(NpyIter *)) \
+ PyArray_API[227])
+#define NpyIter_Deallocate \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[228])
+#define NpyIter_HasDelayedBufAlloc \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[229])
+#define NpyIter_HasExternalLoop \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[230])
+#define NpyIter_EnableExternalLoop \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[231])
+#define NpyIter_GetInnerStrideArray \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[232])
+#define NpyIter_GetInnerLoopSizePtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[233])
+#define NpyIter_Reset \
+ (*(int (*)(NpyIter *, char **)) \
+ PyArray_API[234])
+#define NpyIter_ResetBasePointers \
+ (*(int (*)(NpyIter *, char **, char **)) \
+ PyArray_API[235])
+#define NpyIter_ResetToIterIndexRange \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \
+ PyArray_API[236])
+#define NpyIter_GetNDim \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[237])
+#define NpyIter_GetNOp \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[238])
+#define NpyIter_GetIterNext \
+ (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[239])
+#define NpyIter_GetIterSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[240])
+#define NpyIter_GetIterIndexRange \
+ (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \
+ PyArray_API[241])
+#define NpyIter_GetIterIndex \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[242])
+#define NpyIter_GotoIterIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[243])
+#define NpyIter_HasMultiIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[244])
+#define NpyIter_GetShape \
+ (*(int (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[245])
+#define NpyIter_GetGetMultiIndex \
+ (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[246])
+#define NpyIter_GotoMultiIndex \
+ (*(int (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[247])
+#define NpyIter_RemoveMultiIndex \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[248])
+#define NpyIter_HasIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[249])
+#define NpyIter_IsBuffered \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[250])
+#define NpyIter_IsGrowInner \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[251])
+#define NpyIter_GetBufferSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[252])
+#define NpyIter_GetIndexPtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[253])
+#define NpyIter_GotoIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[254])
+#define NpyIter_GetDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[255])
+#define NpyIter_GetDescrArray \
+ (*(PyArray_Descr ** (*)(NpyIter *)) \
+ PyArray_API[256])
+#define NpyIter_GetOperandArray \
+ (*(PyArrayObject ** (*)(NpyIter *)) \
+ PyArray_API[257])
+#define NpyIter_GetIterView \
+ (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \
+ PyArray_API[258])
+#define NpyIter_GetReadFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[259])
+#define NpyIter_GetWriteFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[260])
+#define NpyIter_DebugPrint \
+ (*(void (*)(NpyIter *)) \
+ PyArray_API[261])
+#define NpyIter_IterationNeedsAPI \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[262])
+#define NpyIter_GetInnerFixedStrideArray \
+ (*(void (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[263])
+#define NpyIter_RemoveAxis \
+ (*(int (*)(NpyIter *, int)) \
+ PyArray_API[264])
+#define NpyIter_GetAxisStrideArray \
+ (*(npy_intp * (*)(NpyIter *, int)) \
+ PyArray_API[265])
+#define NpyIter_RequiresBuffering \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[266])
+#define NpyIter_GetInitialDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[267])
+#define NpyIter_CreateCompatibleStrides \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \
+ PyArray_API[268])
+#define PyArray_CastingConverter \
+ (*(int (*)(PyObject *, NPY_CASTING *)) \
+ PyArray_API[269])
+#define PyArray_CountNonzero \
+ (*(npy_intp (*)(PyArrayObject *)) \
+ PyArray_API[270])
+#define PyArray_PromoteTypes \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[271])
+#define PyArray_MinScalarType \
+ (*(PyArray_Descr * (*)(PyArrayObject *)) \
+ PyArray_API[272])
+#define PyArray_ResultType \
+ (*(PyArray_Descr * (*)(npy_intp, PyArrayObject **, npy_intp, PyArray_Descr **)) \
+ PyArray_API[273])
+#define PyArray_CanCastArrayTo \
+ (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[274])
+#define PyArray_CanCastTypeTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[275])
+#define PyArray_EinsteinSum \
+ (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \
+ PyArray_API[276])
+#define PyArray_NewLikeArray \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \
+ PyArray_API[277])
+#define PyArray_GetArrayParamsFromObject \
+ (*(int (*)(PyObject *, PyArray_Descr *, npy_bool, PyArray_Descr **, int *, npy_intp *, PyArrayObject **, PyObject *)) \
+ PyArray_API[278])
+#define PyArray_ConvertClipmodeSequence \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \
+ PyArray_API[279])
+#define PyArray_MatrixProduct2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \
+ PyArray_API[280])
+#define NpyIter_IsFirstVisit \
+ (*(npy_bool (*)(NpyIter *, int)) \
+ PyArray_API[281])
+#define PyArray_SetBaseObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[282])
+#define PyArray_CreateSortedStridePerm \
+ (*(void (*)(int, npy_intp *, npy_stride_sort_item *)) \
+ PyArray_API[283])
+#define PyArray_RemoveAxesInPlace \
+ (*(void (*)(PyArrayObject *, npy_bool *)) \
+ PyArray_API[284])
+#define PyArray_DebugPrint \
+ (*(void (*)(PyArrayObject *)) \
+ PyArray_API[285])
+#define PyArray_FailUnlessWriteable \
+ (*(int (*)(PyArrayObject *, const char *)) \
+ PyArray_API[286])
+#define PyArray_SetUpdateIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[287])
+#define PyDataMem_NEW \
+ (*(void * (*)(size_t)) \
+ PyArray_API[288])
+#define PyDataMem_FREE \
+ (*(void (*)(void *)) \
+ PyArray_API[289])
+#define PyDataMem_RENEW \
+ (*(void * (*)(void *, size_t)) \
+ PyArray_API[290])
+#define PyDataMem_SetEventHook \
+ (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \
+ PyArray_API[291])
+#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292])
+#define PyArray_MapIterSwapAxes \
+ (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \
+ PyArray_API[293])
+#define PyArray_MapIterArray \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[294])
+#define PyArray_MapIterNext \
+ (*(void (*)(PyArrayMapIterObject *)) \
+ PyArray_API[295])
+#define PyArray_Partition \
+ (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[296])
+#define PyArray_ArgPartition \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[297])
+#define PyArray_SelectkindConverter \
+ (*(int (*)(PyObject *, NPY_SELECTKIND *)) \
+ PyArray_API[298])
+#define PyDataMem_NEW_ZEROED \
+ (*(void * (*)(size_t, size_t)) \
+ PyArray_API[299])
+#define PyArray_CheckAnyScalarExact \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[300])
+#define PyArray_MapIterArrayCopyIfOverlap \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+ PyArray_API[301])
+#define PyArray_ResolveWritebackIfCopy \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[302])
+#define PyArray_SetWritebackIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[303])
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+ int st;
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
+ return -1;
+ }
+
+#if PY_VERSION_HEX >= 0x03000000
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+#else
+ if (!PyCObject_Check(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
+#endif
+ Py_DECREF(c_api);
+ if (PyArray_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+ return -1;
+ }
+
+ /* Perform runtime check of C API version */
+ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "ABI version 0x%x but this version of numpy is 0x%x", \
+ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+ return -1;
+ }
+ if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "API version 0x%x but this version of numpy is 0x%x", \
+ (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
+ return -1;
+ }
+
+ /*
+ * Perform runtime check of endianness and check it matches the one set by
+ * the headers (npy_endian.h) as a safeguard
+ */
+ st = PyArray_GetEndianness();
+ if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
+ return -1;
+ }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+ if (st != NPY_CPU_BIG) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
+ "big endian, but detected different endianness at runtime");
+ return -1;
+ }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ if (st != NPY_CPU_LITTLE) {
+ PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
+ "little endian, but detected different endianness at runtime");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+#if PY_VERSION_HEX >= 0x03000000
+#define NUMPY_IMPORT_ARRAY_RETVAL NULL
+#else
+#define NUMPY_IMPORT_ARRAY_RETVAL
+#endif
+
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
+
+#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
+
+#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
+
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h
new file mode 100644
index 0000000..22d2ba1
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/__ufunc_api.h
@@ -0,0 +1,326 @@
+
+#ifdef _UMATHMODULE
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
+ (PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *);
+NPY_NO_EXPORT int PyUFunc_GenericFunction \
+ (PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **);
+NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **);
+NPY_NO_EXPORT int PyUFunc_checkfperr \
+ (int, PyObject *, int *);
+NPY_NO_EXPORT void PyUFunc_clearfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_getfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *);
+NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
+ (PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *);
+NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \
+ (void **, size_t);
+NPY_NO_EXPORT void PyUFunc_e_e \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *);
+NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyUFunc_ValidateCasting \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \
+ (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *, PyObject *);
+
+#else
+
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
+extern void **PyUFunc_API;
+#else
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+void **PyUFunc_API;
+#else
+static void **PyUFunc_API=NULL;
+#endif
+#endif
+
+#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
+#define PyUFunc_FromFuncAndData \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \
+ PyUFunc_API[1])
+#define PyUFunc_RegisterLoopForType \
+ (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, int *, void *)) \
+ PyUFunc_API[2])
+#define PyUFunc_GenericFunction \
+ (*(int (*)(PyUFuncObject *, PyObject *, PyObject *, PyArrayObject **)) \
+ PyUFunc_API[3])
+#define PyUFunc_f_f_As_d_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[4])
+#define PyUFunc_d_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[5])
+#define PyUFunc_f_f \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[6])
+#define PyUFunc_g_g \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[7])
+#define PyUFunc_F_F_As_D_D \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[8])
+#define PyUFunc_F_F \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[9])
+#define PyUFunc_D_D \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[10])
+#define PyUFunc_G_G \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[11])
+#define PyUFunc_O_O \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[12])
+#define PyUFunc_ff_f_As_dd_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[13])
+#define PyUFunc_ff_f \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[14])
+#define PyUFunc_dd_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[15])
+#define PyUFunc_gg_g \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[16])
+#define PyUFunc_FF_F_As_DD_D \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[17])
+#define PyUFunc_DD_D \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[18])
+#define PyUFunc_FF_F \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[19])
+#define PyUFunc_GG_G \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[20])
+#define PyUFunc_OO_O \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[21])
+#define PyUFunc_O_O_method \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[22])
+#define PyUFunc_OO_O_method \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[23])
+#define PyUFunc_On_Om \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[24])
+#define PyUFunc_GetPyValues \
+ (*(int (*)(char *, int *, int *, PyObject **)) \
+ PyUFunc_API[25])
+#define PyUFunc_checkfperr \
+ (*(int (*)(int, PyObject *, int *)) \
+ PyUFunc_API[26])
+#define PyUFunc_clearfperr \
+ (*(void (*)(void)) \
+ PyUFunc_API[27])
+#define PyUFunc_getfperr \
+ (*(int (*)(void)) \
+ PyUFunc_API[28])
+#define PyUFunc_handlefperr \
+ (*(int (*)(int, PyObject *, int, int *)) \
+ PyUFunc_API[29])
+#define PyUFunc_ReplaceLoopBySignature \
+ (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)) \
+ PyUFunc_API[30])
+#define PyUFunc_FromFuncAndDataAndSignature \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \
+ PyUFunc_API[31])
+#define PyUFunc_SetUsesArraysAsData \
+ (*(int (*)(void **, size_t)) \
+ PyUFunc_API[32])
+#define PyUFunc_e_e \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[33])
+#define PyUFunc_e_e_As_f_f \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[34])
+#define PyUFunc_e_e_As_d_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[35])
+#define PyUFunc_ee_e \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[36])
+#define PyUFunc_ee_e_As_ff_f \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[37])
+#define PyUFunc_ee_e_As_dd_d \
+ (*(void (*)(char **, npy_intp *, npy_intp *, void *)) \
+ PyUFunc_API[38])
+#define PyUFunc_DefaultTypeResolver \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
+ PyUFunc_API[39])
+#define PyUFunc_ValidateCasting \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \
+ PyUFunc_API[40])
+#define PyUFunc_RegisterLoopForDescr \
+ (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
+ PyUFunc_API[41])
+#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *, PyObject *)) \
+ PyUFunc_API[42])
+
+static NPY_INLINE int
+_import_umath(void)
+{
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ PyErr_SetString(PyExc_ImportError,
+ "numpy.core._multiarray_umath failed to import");
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
+ return -1;
+ }
+
+#if PY_VERSION_HEX >= 0x03000000
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+#else
+ if (!PyCObject_Check(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api);
+#endif
+ Py_DECREF(c_api);
+ if (PyUFunc_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
+ return -1;
+ }
+ return 0;
+}
+
+#if PY_VERSION_HEX >= 0x03000000
+#define NUMPY_IMPORT_UMATH_RETVAL NULL
+#else
+#define NUMPY_IMPORT_UMATH_RETVAL
+#endif
+
+#define import_umath() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ return NUMPY_IMPORT_UMATH_RETVAL;\
+ }\
+ } while(0)
+
+#define import_umath1(ret) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_umath2(ret, msg) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError, msg);\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_ufunc() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ }\
+ } while(0)
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
new file mode 100644
index 0000000..e8860cb
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
@@ -0,0 +1,90 @@
+#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP
+#error You should not include this header directly
+#endif
+/*
+ * Private API (here for inline)
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
+
+/*
+ * Update to next item of the iterator
+ *
+ * Note: this simply increment the coordinates vector, last dimension
+ * incremented first , i.e, for dimension 3
+ * ...
+ * -1, -1, -1
+ * -1, -1, 0
+ * -1, -1, 1
+ * ....
+ * -1, 0, -1
+ * -1, 0, 0
+ * ....
+ * 0, -1, -1
+ * 0, -1, 0
+ * ....
+ */
+#define _UPDATE_COORD_ITER(c) \
+ wb = iter->coordinates[c] < iter->bounds[c][1]; \
+ if (wb) { \
+ iter->coordinates[c] += 1; \
+ return 0; \
+ } \
+ else { \
+ iter->coordinates[c] = iter->bounds[c][0]; \
+ }
+
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i, wb;
+
+ for (i = iter->nd - 1; i >= 0; --i) {
+ _UPDATE_COORD_ITER(i)
+ }
+
+ return 0;
+}
+
+/*
+ * Version optimized for 2d arrays, manual loop unrolling
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp wb;
+
+ _UPDATE_COORD_ITER(1)
+ _UPDATE_COORD_ITER(0)
+
+ return 0;
+}
+#undef _UPDATE_COORD_ITER
+
+/*
+ * Advance to the next neighbour
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
+{
+ _PyArrayNeighborhoodIter_IncrCoord (iter);
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
+
+/*
+ * Reset functions
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i;
+
+ for (i = 0; i < iter->nd; ++i) {
+ iter->coordinates[i] = iter->bounds[i][0];
+ }
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h
new file mode 100644
index 0000000..edb7e37
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/_numpyconfig.h
@@ -0,0 +1,32 @@
+#define NPY_HAVE_ENDIAN_H 1
+#define NPY_SIZEOF_SHORT SIZEOF_SHORT
+#define NPY_SIZEOF_INT SIZEOF_INT
+#define NPY_SIZEOF_LONG SIZEOF_LONG
+#define NPY_SIZEOF_FLOAT 4
+#define NPY_SIZEOF_COMPLEX_FLOAT 8
+#define NPY_SIZEOF_DOUBLE 8
+#define NPY_SIZEOF_COMPLEX_DOUBLE 16
+#define NPY_SIZEOF_LONGDOUBLE 16
+#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+#define NPY_SIZEOF_PY_INTPTR_T 8
+#define NPY_SIZEOF_OFF_T 8
+#define NPY_SIZEOF_PY_LONG_LONG 8
+#define NPY_SIZEOF_LONGLONG 8
+#define NPY_NO_SMP 0
+#define NPY_HAVE_DECL_ISNAN
+#define NPY_HAVE_DECL_ISINF
+#define NPY_HAVE_DECL_ISFINITE
+#define NPY_HAVE_DECL_SIGNBIT
+#define NPY_USE_C99_COMPLEX 1
+#define NPY_HAVE_COMPLEX_DOUBLE 1
+#define NPY_HAVE_COMPLEX_FLOAT 1
+#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
+#define NPY_RELAXED_STRIDES_CHECKING 1
+#define NPY_USE_C99_FORMATS 1
+#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
+#define NPY_ABI_VERSION 0x01000009
+#define NPY_API_VERSION 0x0000000D
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h
new file mode 100644
index 0000000..4f46d6b
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h
@@ -0,0 +1,11 @@
+#ifndef Py_ARRAYOBJECT_H
+#define Py_ARRAYOBJECT_H
+
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+
+#ifdef NPY_NO_PREFIX
+#include "noprefix.h"
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h
new file mode 100644
index 0000000..64450e7
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/arrayscalars.h
@@ -0,0 +1,175 @@
+#ifndef _NPY_ARRAYSCALARS_H_
+#define _NPY_ARRAYSCALARS_H_
+
+#ifndef _MULTIARRAYMODULE
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+#endif
+
+
+typedef struct {
+ PyObject_HEAD
+ signed char obval;
+} PyByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ short obval;
+} PyShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ int obval;
+} PyIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ long obval;
+} PyLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longlong obval;
+} PyLongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned char obval;
+} PyUByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned short obval;
+} PyUShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned int obval;
+} PyUIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned long obval;
+} PyULongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_ulonglong obval;
+} PyULongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_half obval;
+} PyHalfScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ float obval;
+} PyFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ double obval;
+} PyDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longdouble obval;
+} PyLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cfloat obval;
+} PyCFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cdouble obval;
+} PyCDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_clongdouble obval;
+} PyCLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ PyObject * obval;
+} PyObjectScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_datetime obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyDatetimeScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_timedelta obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyTimedeltaScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ char obval;
+} PyScalarObject;
+
+#define PyStringScalarObject PyStringObject
+#define PyUnicodeScalarObject PyUnicodeObject
+
+typedef struct {
+ PyObject_VAR_HEAD
+ char *obval;
+ PyArray_Descr *descr;
+ int flags;
+ PyObject *base;
+} PyVoidScalarObject;
+
+/* Macros
+ PyScalarObject
+ PyArrType_Type
+ are defined in ndarrayobject.h
+*/
+
+#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
+#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
+#define PyArrayScalar_FromLong(i) \
+ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
+#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
+ return Py_INCREF(PyArrayScalar_FromLong(i)), \
+ PyArrayScalar_FromLong(i)
+#define PyArrayScalar_RETURN_FALSE \
+ return Py_INCREF(PyArrayScalar_False), \
+ PyArrayScalar_False
+#define PyArrayScalar_RETURN_TRUE \
+ return Py_INCREF(PyArrayScalar_True), \
+ PyArrayScalar_True
+
+#define PyArrayScalar_New(cls) \
+ Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
+#define PyArrayScalar_VAL(obj, cls) \
+ ((Py##cls##ScalarObject *)obj)->obval
+#define PyArrayScalar_ASSIGN(obj, cls, val) \
+ PyArrayScalar_VAL(obj, cls) = val
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h
new file mode 100644
index 0000000..ab0d221
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/halffloat.h
@@ -0,0 +1,70 @@
+#ifndef __NPY_HALFFLOAT_H__
+#define __NPY_HALFFLOAT_H__
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Half-precision routines
+ */
+
+/* Conversions */
+float npy_half_to_float(npy_half h);
+double npy_half_to_double(npy_half h);
+npy_half npy_float_to_half(float f);
+npy_half npy_double_to_half(double d);
+/* Comparisons */
+int npy_half_eq(npy_half h1, npy_half h2);
+int npy_half_ne(npy_half h1, npy_half h2);
+int npy_half_le(npy_half h1, npy_half h2);
+int npy_half_lt(npy_half h1, npy_half h2);
+int npy_half_ge(npy_half h1, npy_half h2);
+int npy_half_gt(npy_half h1, npy_half h2);
+/* faster *_nonan variants for when you know h1 and h2 are not NaN */
+int npy_half_eq_nonan(npy_half h1, npy_half h2);
+int npy_half_lt_nonan(npy_half h1, npy_half h2);
+int npy_half_le_nonan(npy_half h1, npy_half h2);
+/* Miscellaneous functions */
+int npy_half_iszero(npy_half h);
+int npy_half_isnan(npy_half h);
+int npy_half_isinf(npy_half h);
+int npy_half_isfinite(npy_half h);
+int npy_half_signbit(npy_half h);
+npy_half npy_half_copysign(npy_half x, npy_half y);
+npy_half npy_half_spacing(npy_half h);
+npy_half npy_half_nextafter(npy_half x, npy_half y);
+npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
+
+/*
+ * Half-precision constants
+ */
+
+#define NPY_HALF_ZERO (0x0000u)
+#define NPY_HALF_PZERO (0x0000u)
+#define NPY_HALF_NZERO (0x8000u)
+#define NPY_HALF_ONE (0x3c00u)
+#define NPY_HALF_NEGONE (0xbc00u)
+#define NPY_HALF_PINF (0x7c00u)
+#define NPY_HALF_NINF (0xfc00u)
+#define NPY_HALF_NAN (0x7e00u)
+
+#define NPY_MAX_HALF (0x7bffu)
+
+/*
+ * Bit-level conversions
+ */
+
+npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
+npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
+npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
+npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt
new file mode 100644
index 0000000..7c45394
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/multiarray_api.txt
@@ -0,0 +1,2501 @@
+
+===========
+NumPy C-API
+===========
+::
+
+ unsigned int
+ PyArray_GetNDArrayCVersion(void )
+
+
+Included at the very first so not auto-grabbed and thus not labeled.
+
+::
+
+ int
+ PyArray_SetNumericOps(PyObject *dict)
+
+Set internal structure with number functions that all arrays will use
+
+::
+
+ PyObject *
+ PyArray_GetNumericOps(void )
+
+Get dictionary showing number functions that all arrays will use
+
+::
+
+ int
+ PyArray_INCREF(PyArrayObject *mp)
+
+For object arrays, increment all internal references.
+
+::
+
+ int
+ PyArray_XDECREF(PyArrayObject *mp)
+
+Decrement all internal references for object arrays.
+(or arrays with object fields)
+
+::
+
+ void
+ PyArray_SetStringFunction(PyObject *op, int repr)
+
+Set the array print function to be a Python function.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromType(int type)
+
+Get the PyArray_Descr structure for a type.
+
+::
+
+ PyObject *
+ PyArray_TypeObjectFromType(int type)
+
+Get a typeobject from a type-number -- can return NULL.
+
+New reference
+
+::
+
+ char *
+ PyArray_Zero(PyArrayObject *arr)
+
+Get pointer to zero of correct type for array.
+
+::
+
+ char *
+ PyArray_One(PyArrayObject *arr)
+
+Get pointer to one of correct type for array
+
+::
+
+ PyObject *
+ PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int
+ is_f_order)
+
+For backward compatibility
+
+Cast an array using typecode structure.
+steals reference to dtype --- cannot be NULL
+
+This function always makes a copy of arr, even if the dtype
+doesn't change.
+
+::
+
+ int
+ PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp)
+
+Cast to an already created array.
+
+::
+
+ int
+ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp)
+
+Cast to an already created array. Arrays don't have to be "broadcastable"
+Only requirement is they have the same number of elements.
+
+::
+
+ int
+ PyArray_CanCastSafely(int fromtype, int totype)
+
+Check the type coercion rules.
+
+::
+
+ npy_bool
+ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to)
+
+leaves reference count alone --- cannot be NULL
+
+PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting'
+parameter.
+
+::
+
+ int
+ PyArray_ObjectType(PyObject *op, int minimum_type)
+
+Return the typecode of the array a Python object would be converted to
+
+Returns the type number the result should have, or NPY_NOTYPE on error.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype)
+
+new reference -- accepts NULL for mintype
+
+::
+
+ PyArrayObject **
+ PyArray_ConvertToCommonType(PyObject *op, int *retn)
+
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromScalar(PyObject *sc)
+
+Return descr object from array scalar.
+
+New reference
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromTypeObject(PyObject *type)
+
+
+::
+
+ npy_intp
+ PyArray_Size(PyObject *op)
+
+Compute the size of an array (in number of items)
+
+::
+
+ PyObject *
+ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
+
+Get scalar-equivalent to a region of memory described by a descriptor.
+
+::
+
+ PyObject *
+ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
+
+Get 0-dim array from scalar
+
+0-dim array from array-scalar object
+always contains a copy of the data
+unless outcode is NULL, it is of void type and the referrer does
+not own it either.
+
+steals reference to outcode
+
+::
+
+ void
+ PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr)
+
+Convert to c-type
+
+no error checking is performed -- ctypeptr must be same type as scalar
+in case of flexible type, the data is not copied
+into ctypeptr which is expected to be a pointer to pointer
+
+::
+
+ int
+ PyArray_CastScalarToCtype(PyObject *scalar, void
+ *ctypeptr, PyArray_Descr *outcode)
+
+Cast Scalar to c-type
+
+The output buffer must be large-enough to receive the value
+Even for flexible types which is different from ScalarAsCtype
+where only a reference for flexible types is returned
+
+This may not work right on narrow builds for NumPy unicode scalars.
+
+::
+
+ int
+ PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr
+ *indescr, void *ctypeptr, int outtype)
+
+Cast Scalar to c-type
+
+::
+
+ PyObject *
+ PyArray_ScalarFromObject(PyObject *object)
+
+Get an Array Scalar From a Python Object
+
+Returns NULL if unsuccessful but error is only set if another error occurred.
+Currently only Numeric-like object supported.
+
+::
+
+ PyArray_VectorUnaryFunc *
+ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
+
+Get a cast function to cast from the input descriptor to the
+output type_number (must be a registered data-type).
+Returns NULL if un-successful.
+
+::
+
+ PyObject *
+ PyArray_FromDims(int nd, int *d, int type)
+
+Construct an empty array from dimensions and typenum
+
+::
+
+ PyObject *
+ PyArray_FromDimsAndDataAndDescr(int nd, int *d, PyArray_Descr
+ *descr, char *data)
+
+Like FromDimsAndData but uses the Descr structure instead of typecode
+as input.
+
+::
+
+ PyObject *
+ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int
+ min_depth, int max_depth, int flags, PyObject
+ *context)
+
+Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags
+Steals a reference to newtype --- which can be NULL
+
+::
+
+ PyObject *
+ PyArray_EnsureArray(PyObject *op)
+
+This is a quick wrapper around
+PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL)
+that special cases Arrays and PyArray_Scalars up front
+It *steals a reference* to the object
+It also guarantees that the result is PyArray_Type
+Because it decrefs op if any conversion needs to take place
+so it can be used like PyArray_EnsureArray(some_function(...))
+
+::
+
+ PyObject *
+ PyArray_EnsureAnyArray(PyObject *op)
+
+
+::
+
+ PyObject *
+ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char
+ *sep)
+
+
+Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an
+array corresponding to the data encoded in that file.
+
+If the dtype is NULL, the default array type is used (double).
+If non-null, the reference is stolen and if dtype->subarray is true dtype
+will be decrefed even on success.
+
+The number of elements to read is given as ``num``; if it is < 0, then
+then as many as possible are read.
+
+If ``sep`` is NULL or empty, then binary data is assumed, else
+text data, with ``sep`` as the separator between elements. Whitespace in
+the separator matches any length of whitespace in the text, and a match
+for whitespace around the separator is added.
+
+For memory-mapped files, use the buffer interface. No more data than
+necessary is read by this routine.
+
+::
+
+ PyObject *
+ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr
+ *dtype, npy_intp num, char *sep)
+
+
+Given a pointer to a string ``data``, a string length ``slen``, and
+a ``PyArray_Descr``, return an array corresponding to the data
+encoded in that string.
+
+If the dtype is NULL, the default array type is used (double).
+If non-null, the reference is stolen.
+
+If ``slen`` is < 0, then the end of string is used for text data.
+It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs
+would be the norm).
+
+The number of elements to read is given as ``num``; if it is < 0, then
+then as many as possible are read.
+
+If ``sep`` is NULL or empty, then binary data is assumed, else
+text data, with ``sep`` as the separator between elements. Whitespace in
+the separator matches any length of whitespace in the text, and a match
+for whitespace around the separator is added.
+
+::
+
+ PyObject *
+ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp
+ count, npy_intp offset)
+
+
+::
+
+ PyObject *
+ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
+
+
+steals a reference to dtype (which cannot be NULL)
+
+::
+
+ PyObject *
+ PyArray_Return(PyArrayObject *mp)
+
+
+Return either an array or the appropriate Python object if the array
+is 0d and matches a Python type.
+steals reference to mp
+
+::
+
+ PyObject *
+ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int
+ offset)
+
+Get a subset of bytes from each element of the array
+steals reference to typed, must not be NULL
+
+::
+
+ int
+ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int
+ offset, PyObject *val)
+
+Set a subset of bytes from each element of the array
+steals reference to dtype, must not be NULL
+
+::
+
+ PyObject *
+ PyArray_Byteswap(PyArrayObject *self, npy_bool inplace)
+
+
+::
+
+ PyObject *
+ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int
+ refcheck, NPY_ORDER order)
+
+Resize (reallocate data). Only works if nothing else is referencing this
+array and it is contiguous. If refcheck is 0, then the reference count is
+not checked and assumed to be 1. You still must own this data and have no
+weak-references and no base object.
+
+::
+
+ int
+ PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src)
+
+Move the memory of one array into another, allowing for overlapping data.
+
+Returns 0 on success, negative on failure.
+
+::
+
+ int
+ PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src)
+
+Copy an Array into another array.
+Broadcast to the destination shape if necessary.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ int
+ PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src)
+
+Copy an Array into another array -- memory must not overlap
+Does not require src and dest to have "broadcastable" shapes
+(only the same number of elements).
+
+TODO: For NumPy 2.0, this could accept an order parameter which
+only allows NPY_CORDER and NPY_FORDER. Could also rename
+this to CopyAsFlat to make the name more intuitive.
+
+Returns 0 on success, -1 on error.
+
+::
+
+ int
+ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
+
+
+::
+
+ PyObject *
+ PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order)
+
+Copy an array.
+
+::
+
+ PyObject *
+ PyArray_ToList(PyArrayObject *self)
+
+To List
+
+::
+
+ PyObject *
+ PyArray_ToString(PyArrayObject *self, NPY_ORDER order)
+
+
+::
+
+ int
+ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
+
+To File
+
+::
+
+ int
+ PyArray_Dump(PyObject *self, PyObject *file, int protocol)
+
+
+::
+
+ PyObject *
+ PyArray_Dumps(PyObject *self, int protocol)
+
+
+::
+
+ int
+ PyArray_ValidType(int type)
+
+Is the typenum valid?
+
+::
+
+ void
+ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
+
+Update Several Flags at once.
+
+::
+
+ PyObject *
+ PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int
+ type_num, npy_intp *strides, void *data, int itemsize, int
+ flags, PyObject *obj)
+
+Generic new array creation routine.
+
+::
+
+ PyObject *
+ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int
+ nd, npy_intp *dims, npy_intp *strides, void
+ *data, int flags, PyObject *obj)
+
+Generic new array creation routine.
+
+steals a reference to descr. On failure or when dtype->subarray is
+true, dtype will be decrefed.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNew(PyArray_Descr *base)
+
+base cannot be NULL
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNewFromType(int type_num)
+
+
+::
+
+ double
+ PyArray_GetPriority(PyObject *obj, double default_)
+
+Get Priority from object
+
+::
+
+ PyObject *
+ PyArray_IterNew(PyObject *obj)
+
+Get Iterator.
+
+::
+
+ PyObject *
+ PyArray_MultiIterNew(int n, ... )
+
+Get MultiIterator,
+
+::
+
+ int
+ PyArray_PyIntAsInt(PyObject *o)
+
+
+::
+
+ npy_intp
+ PyArray_PyIntAsIntp(PyObject *o)
+
+
+::
+
+ int
+ PyArray_Broadcast(PyArrayMultiIterObject *mit)
+
+
+::
+
+ void
+ PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj)
+
+Assumes contiguous
+
+::
+
+ int
+ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
+
+
+::
+
+ npy_bool
+ PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp
+ offset, npy_intp *dims, npy_intp *newstrides)
+
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
+
+
+returns a copy of the PyArray_Descr structure with the byteorder
+altered:
+no arguments: The byteorder is swapped (in all subfields as well)
+single argument: The byteorder is forced to the given state
+(in all subfields as well)
+
+Valid states: ('big', '>') or ('little' or '<')
+('native', or '=')
+
+If a descr structure with | is encountered it's own
+byte-order is not changed but any fields are:
+
+
+Deep bytorder change of a data-type descriptor
+Leaves reference count of self unchanged --- does not DECREF self ***
+
+::
+
+ PyObject *
+ PyArray_IterAllButAxis(PyObject *obj, int *inaxis)
+
+Get Iterator that iterates over all but one axis (don't use this with
+PyArray_ITER_GOTO1D). The axis will be over-written if negative
+with the axis having the smallest stride.
+
+::
+
+ PyObject *
+ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int
+ min_depth, int max_depth, int requires, PyObject
+ *context)
+
+steals a reference to descr -- accepts NULL
+
+::
+
+ PyObject *
+ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int
+ flags)
+
+steals reference to newtype --- acc. NULL
+
+::
+
+ PyObject *
+ PyArray_FromInterface(PyObject *origin)
+
+
+::
+
+ PyObject *
+ PyArray_FromStructInterface(PyObject *input)
+
+
+::
+
+ PyObject *
+ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject
+ *context)
+
+
+::
+
+ NPY_SCALARKIND
+ PyArray_ScalarKind(int typenum, PyArrayObject **arr)
+
+ScalarKind
+
+Returns the scalar kind of a type number, with an
+optional tweak based on the scalar value itself.
+If no scalar is provided, it returns INTPOS_SCALAR
+for both signed and unsigned integers, otherwise
+it checks the sign of any signed integer to choose
+INTNEG_SCALAR when appropriate.
+
+::
+
+ int
+ PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND
+ scalar)
+
+
+Determines whether the data type 'thistype', with
+scalar kind 'scalar', can be coerced into 'neededtype'.
+
+::
+
+ PyObject *
+ PyArray_NewFlagsObject(PyObject *obj)
+
+
+Get New ArrayFlagsObject
+
+::
+
+ npy_bool
+ PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to)
+
+See if array scalars can be cast.
+
+TODO: For NumPy 2.0, add a NPY_CASTING parameter.
+
+::
+
+ int
+ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+
+
+::
+
+ int
+ PyArray_RemoveSmallest(PyArrayMultiIterObject *multi)
+
+Adjusts previously broadcasted iterators so that the axis with
+the smallest sum of iterator strides is not iterated over.
+Returns dimension which is smallest in the range [0,multi->nd).
+A -1 is returned if multi->nd == 0.
+
+don't use with PyArray_ITER_GOTO1D because factors are not adjusted
+
+::
+
+ int
+ PyArray_ElementStrides(PyObject *obj)
+
+
+::
+
+ void
+ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
+
+XINCREF all objects in a single array item. This is complicated for
+structured datatypes where the position of objects needs to be extracted.
+The function is execute recursively for each nested field or subarrays dtype
+such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
+
+::
+
+ void
+ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
+
+
+XDECREF all objects in a single array item. This is complicated for
+structured datatypes where the position of objects needs to be extracted.
+The function is execute recursively for each nested field or subarrays dtype
+such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
+
+::
+
+ PyObject *
+ PyArray_FieldNames(PyObject *fields)
+
+Return the tuple of ordered field names from a dictionary.
+
+::
+
+ PyObject *
+ PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute)
+
+Return Transpose.
+
+::
+
+ PyObject *
+ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int
+ axis, PyArrayObject *out, NPY_CLIPMODE clipmode)
+
+Take
+
+::
+
+ PyObject *
+ PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject
+ *indices0, NPY_CLIPMODE clipmode)
+
+Put values into an array
+
+::
+
+ PyObject *
+ PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0)
+
+Put values into an array according to a mask.
+
+::
+
+ PyObject *
+ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
+
+Repeat the array.
+
+::
+
+ PyObject *
+ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject
+ *out, NPY_CLIPMODE clipmode)
+
+
+::
+
+ int
+ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+
+Sort an array in-place
+
+::
+
+ PyObject *
+ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+
+ArgSort an array
+
+::
+
+ PyObject *
+ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE
+ side, PyObject *perm)
+
+
+Search the sorted array op1 for the location of the items in op2. The
+result is an array of indexes, one for each element in op2, such that if
+the item were to be inserted in op1 just before that index the array
+would still be in sorted order.
+
+Parameters
+----------
+op1 : PyArrayObject *
+Array to be searched, must be 1-D.
+op2 : PyObject *
+Array of items whose insertion indexes in op1 are wanted
+side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT}
+If NPY_SEARCHLEFT, return first valid insertion indexes
+If NPY_SEARCHRIGHT, return last valid insertion indexes
+perm : PyObject *
+Permutation array that sorts op1 (optional)
+
+Returns
+-------
+ret : PyObject *
+New reference to npy_intp array containing indexes where items in op2
+could be validly inserted into op1. NULL on error.
+
+Notes
+-----
+Binary search is used to find the indexes.
+
+::
+
+ PyObject *
+ PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out)
+
+ArgMax
+
+::
+
+ PyObject *
+ PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out)
+
+ArgMin
+
+::
+
+ PyObject *
+ PyArray_Reshape(PyArrayObject *self, PyObject *shape)
+
+Reshape
+
+::
+
+ PyObject *
+ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER
+ order)
+
+New shape for an array
+
+::
+
+ PyObject *
+ PyArray_Squeeze(PyArrayObject *self)
+
+
+return a new view of the array object with all of its unit-length
+dimensions squeezed out if needed, otherwise
+return the same array.
+
+::
+
+ PyObject *
+ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject
+ *pytype)
+
+View
+steals a reference to type -- accepts NULL
+
+::
+
+ PyObject *
+ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2)
+
+SwapAxes
+
+::
+
+ PyObject *
+ PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Max
+
+::
+
+ PyObject *
+ PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Min
+
+::
+
+ PyObject *
+ PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Ptp
+
+::
+
+ PyObject *
+ PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Mean
+
+::
+
+ PyObject *
+ PyArray_Trace(PyArrayObject *self, int offset, int axis1, int
+ axis2, int rtype, PyArrayObject *out)
+
+Trace
+
+::
+
+ PyObject *
+ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int
+ axis2)
+
+Diagonal
+
+In NumPy versions prior to 1.7, this function always returned a copy of
+the diagonal array. In 1.7, the code has been updated to compute a view
+onto 'self', but it still copies this array before returning, as well as
+setting the internal WARN_ON_WRITE flag. In a future version, it will
+simply return a view onto self.
+
+::
+
+ PyObject *
+ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject
+ *max, PyArrayObject *out)
+
+Clip
+
+::
+
+ PyObject *
+ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out)
+
+Conjugate
+
+::
+
+ PyObject *
+ PyArray_Nonzero(PyArrayObject *self)
+
+Nonzero
+
+TODO: In NumPy 2.0, should make the iteration order a parameter.
+
+::
+
+ PyObject *
+ PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out, int variance)
+
+Set variance to 1 to by-pass square-root calculation and return variance
+Std
+
+::
+
+ PyObject *
+ PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Sum
+
+::
+
+ PyObject *
+ PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+CumSum
+
+::
+
+ PyObject *
+ PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Prod
+
+::
+
+ PyObject *
+ PyArray_CumProd(PyArrayObject *self, int axis, int
+ rtype, PyArrayObject *out)
+
+CumProd
+
+::
+
+ PyObject *
+ PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out)
+
+All
+
+::
+
+ PyObject *
+ PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out)
+
+Any
+
+::
+
+ PyObject *
+ PyArray_Compress(PyArrayObject *self, PyObject *condition, int
+ axis, PyArrayObject *out)
+
+Compress
+
+::
+
+ PyObject *
+ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
+
+Flatten
+
+::
+
+ PyObject *
+ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order)
+
+Ravel
+Returns a contiguous array
+
+::
+
+ npy_intp
+ PyArray_MultiplyList(npy_intp *l1, int n)
+
+Multiply a List
+
+::
+
+ int
+ PyArray_MultiplyIntList(int *l1, int n)
+
+Multiply a List of ints
+
+::
+
+ void *
+ PyArray_GetPtr(PyArrayObject *obj, npy_intp*ind)
+
+Produce a pointer into array
+
+::
+
+ int
+ PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n)
+
+Compare Lists
+
+::
+
+ int
+ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int
+ nd, PyArray_Descr*typedescr)
+
+Simulate a C-array
+steals a reference to typedescr -- can be NULL
+
+::
+
+ int
+ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
+
+Convert to a 1D C-array
+
+::
+
+ int
+ PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int
+ typecode)
+
+Convert to a 2D C-array
+
+::
+
+ int
+ PyArray_Free(PyObject *op, void *ptr)
+
+Free pointers created if As2D is called
+
+::
+
+ int
+ PyArray_Converter(PyObject *object, PyObject **address)
+
+
+Useful to pass as converter function for O& processing in PyArgs_ParseTuple.
+
+This conversion function can be used with the "O&" argument for
+PyArg_ParseTuple. It will immediately return an object of array type
+or will convert to a NPY_ARRAY_CARRAY any other object.
+
+If you use PyArray_Converter, you must DECREF the array when finished
+as you get a new reference to it.
+
+::
+
+ int
+ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
+
+PyArray_IntpFromSequence
+Returns the number of integers converted or -1 if an error occurred.
+vals must be large enough to hold maxvals
+
+::
+
+ PyObject *
+ PyArray_Concatenate(PyObject *op, int axis)
+
+Concatenate
+
+Concatenate an arbitrary Python sequence into an array.
+op is a python object supporting the sequence interface.
+Its elements will be concatenated together to form a single
+multidimensional array. If axis is NPY_MAXDIMS or bigger, then
+each sequence object will be flattened before concatenation
+
+::
+
+ PyObject *
+ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
+
+Numeric.innerproduct(a,v)
+
+::
+
+ PyObject *
+ PyArray_MatrixProduct(PyObject *op1, PyObject *op2)
+
+Numeric.matrixproduct(a,v)
+just like inner product but does the swapaxes stuff on the fly
+
+::
+
+ PyObject *
+ PyArray_CopyAndTranspose(PyObject *op)
+
+Copy and Transpose
+
+Could deprecate this function, as there isn't a speed benefit over
+calling Transpose and then Copy.
+
+::
+
+ PyObject *
+ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode)
+
+Numeric.correlate(a1,a2,mode)
+
+::
+
+ int
+ PyArray_TypestrConvert(int itemsize, int gentype)
+
+Typestr converter
+
+::
+
+ int
+ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
+
+Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
+This function takes a Python object representing a type and converts it
+to a the correct PyArray_Descr * structure to describe the type.
+
+Many objects can be used to represent a data-type which in NumPy is
+quite a flexible concept.
+
+This is the central code that converts Python objects to
+Type-descriptor objects that are used throughout numpy.
+
+Returns a new reference in *at, but the returned should not be
+modified as it may be one of the canonical immutable objects or
+a reference to the input obj.
+
+::
+
+ int
+ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at)
+
+Get typenum from an object -- None goes to NULL
+
+::
+
+ int
+ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
+
+Get intp chunk from sequence
+
+This function takes a Python sequence object and allocates and
+fills in an intp array with the converted values.
+
+Remember to free the pointer seq.ptr when done using
+PyDimMem_FREE(seq.ptr)**
+
+::
+
+ int
+ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
+
+Get buffer chunk from object
+
+this function takes a Python object which exposes the (single-segment)
+buffer interface and returns a pointer to the data segment
+
+You should increment the reference count by one of buf->base
+if you will hang on to a reference
+
+You only get a borrowed reference to the object. Do not free the
+memory...
+
+::
+
+ int
+ PyArray_AxisConverter(PyObject *obj, int *axis)
+
+Get axis from an object (possibly None) -- a converter function,
+
+See also PyArray_ConvertMultiAxis, which also handles a tuple of axes.
+
+::
+
+ int
+ PyArray_BoolConverter(PyObject *object, npy_bool *val)
+
+Convert an object to true / false
+
+::
+
+ int
+ PyArray_ByteorderConverter(PyObject *obj, char *endian)
+
+Convert object to endian
+
+::
+
+ int
+ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val)
+
+Convert an object to FORTRAN / C / ANY / KEEP
+
+::
+
+ unsigned char
+ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+
+
+This function returns true if the two typecodes are
+equivalent (same basic kind and same itemsize).
+
+::
+
+ PyObject *
+ PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int
+ is_f_order)
+
+Zeros
+
+steals a reference to type. On failure or when dtype->subarray is
+true, dtype will be decrefed.
+accepts NULL type
+
+::
+
+ PyObject *
+ PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int
+ is_f_order)
+
+Empty
+
+accepts NULL type
+steals referenct to type
+
+::
+
+ PyObject *
+ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y)
+
+Where
+
+::
+
+ PyObject *
+ PyArray_Arange(double start, double stop, double step, int type_num)
+
+Arange,
+
+::
+
+ PyObject *
+ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject
+ *step, PyArray_Descr *dtype)
+
+
+ArangeObj,
+
+this doesn't change the references
+
+::
+
+ int
+ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
+
+Convert object to sort kind
+
+::
+
+ PyObject *
+ PyArray_LexSort(PyObject *sort_keys, int axis)
+
+LexSort an array providing indices that will sort a collection of arrays
+lexicographically. The first key is sorted on first, followed by the second key
+-- requires that arg"merge"sort is available for each sort_key
+
+Returns an index array that shows the indexes for the lexicographic sort along
+the given axis.
+
+::
+
+ PyObject *
+ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out)
+
+Round
+
+::
+
+ unsigned char
+ PyArray_EquivTypenums(int typenum1, int typenum2)
+
+
+::
+
+ int
+ PyArray_RegisterDataType(PyArray_Descr *descr)
+
+Register Data type
+Does not change the reference count of descr
+
+::
+
+ int
+ PyArray_RegisterCastFunc(PyArray_Descr *descr, int
+ totype, PyArray_VectorUnaryFunc *castfunc)
+
+Register Casting Function
+Replaces any function currently stored.
+
+::
+
+ int
+ PyArray_RegisterCanCast(PyArray_Descr *descr, int
+ totype, NPY_SCALARKIND scalar)
+
+Register a type number indicating that a descriptor can be cast
+to it safely
+
+::
+
+ void
+ PyArray_InitArrFuncs(PyArray_ArrFuncs *f)
+
+Initialize arrfuncs to NULL
+
+::
+
+ PyObject *
+ PyArray_IntTupleFromIntp(int len, npy_intp *vals)
+
+PyArray_IntTupleFromIntp
+
+::
+
+ int
+ PyArray_TypeNumFromName(char *str)
+
+
+::
+
+ int
+ PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val)
+
+Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP
+
+::
+
+ int
+ PyArray_OutputConverter(PyObject *object, PyArrayObject **address)
+
+Useful to pass as converter function for O& processing in
+PyArgs_ParseTuple for output arrays
+
+::
+
+ PyObject *
+ PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd)
+
+Get Iterator broadcast to a particular shape
+
+::
+
+ void
+ _PyArray_SigintHandler(int signum)
+
+
+::
+
+ void*
+ _PyArray_GetSigintBuf(void )
+
+
+::
+
+ int
+ PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
+
+
+Get type-descriptor from an object forcing alignment if possible
+None goes to DEFAULT type.
+
+any object with the .fields attribute and/or .itemsize attribute (if the
+.fields attribute does not give the total size -- i.e. a partial record
+naming). If itemsize is given it must be >= size computed from fields
+
+The .fields attribute must return a convertible dictionary if present.
+Result inherits from NPY_VOID.
+
+::
+
+ int
+ PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at)
+
+
+Get type-descriptor from an object forcing alignment if possible
+None goes to NULL.
+
+::
+
+ int
+ PyArray_SearchsideConverter(PyObject *obj, void *addr)
+
+Convert object to searchsorted side
+
+::
+
+ PyObject *
+ PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags)
+
+PyArray_CheckAxis
+
+check that axis is valid
+convert 0-d arrays to 1-d arrays
+
+::
+
+ npy_intp
+ PyArray_OverflowMultiplyList(npy_intp *l1, int n)
+
+Multiply a List of Non-negative numbers with over-flow detection.
+
+::
+
+ int
+ PyArray_CompareString(char *s1, char *s2, size_t len)
+
+
+::
+
+ PyObject *
+ PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... )
+
+Get MultiIterator from array of Python objects and any additional
+
+PyObject **mps -- array of PyObjects
+int n - number of PyObjects in the array
+int nadd - number of additional arrays to include in the iterator.
+
+Returns a multi-iterator object.
+
+::
+
+ int
+ PyArray_GetEndianness(void )
+
+
+::
+
+ unsigned int
+ PyArray_GetNDArrayCFeatureVersion(void )
+
+Returns the built-in (at compilation time) C API version
+
+::
+
+ PyObject *
+ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode)
+
+correlate(a1,a2,mode)
+
+This function computes the usual correlation (correlate(a1, a2) !=
+correlate(a2, a1), and conjugate the second argument for complex inputs
+
+::
+
+ PyObject*
+ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp
+ *bounds, int mode, PyArrayObject*fill)
+
+A Neighborhood Iterator object.
+
+::
+
+ void
+ PyArray_SetDatetimeParseFunction(PyObject *op)
+
+This function is scheduled to be removed
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ void
+ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT
+ fr, npy_datetimestruct *result)
+
+Fill the datetime struct from the value and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ void
+ PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT
+ fr, npy_timedeltastruct *result)
+
+Fill the timedelta struct from the timedelta value and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ npy_datetime
+ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT
+ fr, npy_datetimestruct *d)
+
+Create a datetime value from a filled datetime struct and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ npy_datetime
+ PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT
+ fr, npy_timedeltastruct *d)
+
+Create a timdelta value from a filled timedelta struct and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ NpyIter *
+ NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER
+ order, NPY_CASTING casting, PyArray_Descr*dtype)
+
+Allocate a new iterator for one array object.
+
+::
+
+ NpyIter *
+ NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32
+ flags, NPY_ORDER order, NPY_CASTING
+ casting, npy_uint32 *op_flags, PyArray_Descr
+ **op_request_dtypes)
+
+Allocate a new iterator for more than one array object, using
+standard NumPy broadcasting rules and the default buffer size.
+
+::
+
+ NpyIter *
+ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32
+ flags, NPY_ORDER order, NPY_CASTING
+ casting, npy_uint32 *op_flags, PyArray_Descr
+ **op_request_dtypes, int oa_ndim, int
+ **op_axes, npy_intp *itershape, npy_intp
+ buffersize)
+
+Allocate a new iterator for multiple array objects, and advanced
+options for controlling the broadcasting, shape, and buffer size.
+
+::
+
+ NpyIter *
+ NpyIter_Copy(NpyIter *iter)
+
+Makes a copy of the iterator
+
+::
+
+ int
+ NpyIter_Deallocate(NpyIter *iter)
+
+Deallocate an iterator
+
+::
+
+ npy_bool
+ NpyIter_HasDelayedBufAlloc(NpyIter *iter)
+
+Whether the buffer allocation is being delayed
+
+::
+
+ npy_bool
+ NpyIter_HasExternalLoop(NpyIter *iter)
+
+Whether the iterator handles the inner loop
+
+::
+
+ int
+ NpyIter_EnableExternalLoop(NpyIter *iter)
+
+Removes the inner loop handling (so HasExternalLoop returns true)
+
+::
+
+ npy_intp *
+ NpyIter_GetInnerStrideArray(NpyIter *iter)
+
+Get the array of strides for the inner loop (when HasExternalLoop is true)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ npy_intp *
+ NpyIter_GetInnerLoopSizePtr(NpyIter *iter)
+
+Get a pointer to the size of the inner loop (when HasExternalLoop is true)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_Reset(NpyIter *iter, char **errmsg)
+
+Resets the iterator to its initial state
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ int
+ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char
+ **errmsg)
+
+Resets the iterator to its initial state, with new base data pointers.
+This function requires great caution.
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ int
+ NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp
+ iend, char **errmsg)
+
+Resets the iterator to a new iterator index range
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ int
+ NpyIter_GetNDim(NpyIter *iter)
+
+Gets the number of dimensions being iterated
+
+::
+
+ int
+ NpyIter_GetNOp(NpyIter *iter)
+
+Gets the number of operands being iterated
+
+::
+
+ NpyIter_IterNextFunc *
+ NpyIter_GetIterNext(NpyIter *iter, char **errmsg)
+
+Compute the specialized iteration function for an iterator
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ npy_intp
+ NpyIter_GetIterSize(NpyIter *iter)
+
+Gets the number of elements being iterated
+
+::
+
+ void
+ NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp
+ *iend)
+
+Gets the range of iteration indices being iterated
+
+::
+
+ npy_intp
+ NpyIter_GetIterIndex(NpyIter *iter)
+
+Gets the current iteration index
+
+::
+
+ int
+ NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex)
+
+Sets the iterator position to the specified iterindex,
+which matches the iteration order of the iterator.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ npy_bool
+ NpyIter_HasMultiIndex(NpyIter *iter)
+
+Whether the iterator is tracking a multi-index
+
+::
+
+ int
+ NpyIter_GetShape(NpyIter *iter, npy_intp *outshape)
+
+Gets the broadcast shape if a multi-index is being tracked by the iterator,
+otherwise gets the shape of the iteration as Fortran-order
+(fastest-changing index first).
+
+The reason Fortran-order is returned when a multi-index
+is not enabled is that this is providing a direct view into how
+the iterator traverses the n-dimensional space. The iterator organizes
+its memory from fastest index to slowest index, and when
+a multi-index is enabled, it uses a permutation to recover the original
+order.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ NpyIter_GetMultiIndexFunc *
+ NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg)
+
+Compute a specialized get_multi_index function for the iterator
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ int
+ NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index)
+
+Sets the iterator to the specified multi-index, which must have the
+correct number of entries for 'ndim'. It is only valid
+when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation
+fails if the multi-index is out of bounds.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ int
+ NpyIter_RemoveMultiIndex(NpyIter *iter)
+
+Removes multi-index support from an iterator.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ npy_bool
+ NpyIter_HasIndex(NpyIter *iter)
+
+Whether the iterator is tracking an index
+
+::
+
+ npy_bool
+ NpyIter_IsBuffered(NpyIter *iter)
+
+Whether the iterator is buffered
+
+::
+
+ npy_bool
+ NpyIter_IsGrowInner(NpyIter *iter)
+
+Whether the inner loop can grow if buffering is unneeded
+
+::
+
+ npy_intp
+ NpyIter_GetBufferSize(NpyIter *iter)
+
+Gets the size of the buffer, or 0 if buffering is not enabled
+
+::
+
+ npy_intp *
+ NpyIter_GetIndexPtr(NpyIter *iter)
+
+Get a pointer to the index, if it is being tracked
+
+::
+
+ int
+ NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index)
+
+If the iterator is tracking an index, sets the iterator
+to the specified index.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ char **
+ NpyIter_GetDataPtrArray(NpyIter *iter)
+
+Get the array of data pointers (1 per object being iterated)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ PyArray_Descr **
+ NpyIter_GetDescrArray(NpyIter *iter)
+
+Get the array of data type pointers (1 per object being iterated)
+
+::
+
+ PyArrayObject **
+ NpyIter_GetOperandArray(NpyIter *iter)
+
+Get the array of objects being iterated
+
+::
+
+ PyArrayObject *
+ NpyIter_GetIterView(NpyIter *iter, npy_intp i)
+
+Returns a view to the i-th object with the iterator's internal axes
+
+::
+
+ void
+ NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags)
+
+Gets an array of read flags (1 per object being iterated)
+
+::
+
+ void
+ NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags)
+
+Gets an array of write flags (1 per object being iterated)
+
+::
+
+ void
+ NpyIter_DebugPrint(NpyIter *iter)
+
+For debugging
+
+::
+
+ npy_bool
+ NpyIter_IterationNeedsAPI(NpyIter *iter)
+
+Whether the iteration loop, and in particular the iternext()
+function, needs API access. If this is true, the GIL must
+be retained while iterating.
+
+::
+
+ void
+ NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides)
+
+Get an array of strides which are fixed. Any strides which may
+change during iteration receive the value NPY_MAX_INTP. Once
+the iterator is ready to iterate, call this to get the strides
+which will always be fixed in the inner loop, then choose optimized
+inner loop functions which take advantage of those fixed strides.
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_RemoveAxis(NpyIter *iter, int axis)
+
+Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX
+was set for iterator creation, and does not work if buffering is
+enabled. This function also resets the iterator to its initial state.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ npy_intp *
+ NpyIter_GetAxisStrideArray(NpyIter *iter, int axis)
+
+Gets the array of strides for the specified axis.
+If the iterator is tracking a multi-index, gets the strides
+for the axis specified, otherwise gets the strides for
+the iteration axis as Fortran order (fastest-changing axis first).
+
+Returns NULL if an error occurs.
+
+::
+
+ npy_bool
+ NpyIter_RequiresBuffering(NpyIter *iter)
+
+Whether the iteration could be done with no buffering.
+
+::
+
+ char **
+ NpyIter_GetInitialDataPtrArray(NpyIter *iter)
+
+Get the array of data pointers (1 per object being iterated),
+directly into the arrays (never pointing to a buffer), for starting
+unbuffered iteration. This always returns the addresses for the
+iterator position as reset to iterator index 0.
+
+These pointers are different from the pointers accepted by
+NpyIter_ResetBasePointers, because the direction along some
+axes may have been reversed, requiring base offsets.
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp
+ itemsize, npy_intp *outstrides)
+
+Builds a set of strides which are the same as the strides of an
+output array created using the NPY_ITER_ALLOCATE flag, where NULL
+was passed for op_axes. This is for data packed contiguously,
+but not necessarily in C or Fortran order. This should be used
+together with NpyIter_GetShape and NpyIter_GetNDim.
+
+A use case for this function is to match the shape and layout of
+the iterator and tack on one or more dimensions. For example,
+in order to generate a vector per input value for a numerical gradient,
+you pass in ndim*itemsize for itemsize, then add another dimension to
+the end with size ndim and stride itemsize. To do the Hessian matrix,
+you do the same thing but add two dimensions, or take advantage of
+the symmetry and pack it into 1 dimension with a particular encoding.
+
+This function may only be called if the iterator is tracking a multi-index
+and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from
+being iterated in reverse order.
+
+If an array is created with this method, simply adding 'itemsize'
+for each iteration will traverse the new array matching the
+iterator.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ int
+ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting)
+
+Convert any Python object, *obj*, to an NPY_CASTING enum.
+
+::
+
+ npy_intp
+ PyArray_CountNonzero(PyArrayObject *self)
+
+Counts the number of non-zero elements in the array.
+
+Returns -1 on error.
+
+::
+
+ PyArray_Descr *
+ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+
+Produces the smallest size and lowest kind type to which both
+input types can be cast.
+
+::
+
+ PyArray_Descr *
+ PyArray_MinScalarType(PyArrayObject *arr)
+
+If arr is a scalar (has 0 dimensions) with a built-in number data type,
+finds the smallest type size/kind which can still represent its data.
+Otherwise, returns the array's data type.
+
+
+::
+
+ PyArray_Descr *
+ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr, npy_intp
+ ndtypes, PyArray_Descr **dtypes)
+
+Produces the result type of a bunch of inputs, using the UFunc
+type promotion rules. Use this function when you have a set of
+input arrays, and need to determine an output array dtype.
+
+If all the inputs are scalars (have 0 dimensions) or the maximum "kind"
+of the scalars is greater than the maximum "kind" of the arrays, does
+a regular type promotion.
+
+Otherwise, does a type promotion on the MinScalarType
+of all the inputs. Data types passed directly are treated as array
+types.
+
+
+::
+
+ npy_bool
+ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr
+ *to, NPY_CASTING casting)
+
+Returns 1 if the array object may be cast to the given data type using
+the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in
+that it handles scalar arrays (0 dimensions) specially, by checking
+their value.
+
+::
+
+ npy_bool
+ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr
+ *to, NPY_CASTING casting)
+
+Returns true if data of type 'from' may be cast to data of type
+'to' according to the rule 'casting'.
+
+::
+
+ PyArrayObject *
+ PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject
+ **op_in, PyArray_Descr *dtype, NPY_ORDER
+ order, NPY_CASTING casting, PyArrayObject *out)
+
+This function provides summation of array elements according to
+the Einstein summation convention. For example:
+- trace(a) -> einsum("ii", a)
+- transpose(a) -> einsum("ji", a)
+- multiply(a,b) -> einsum(",", a, b)
+- inner(a,b) -> einsum("i,i", a, b)
+- outer(a,b) -> einsum("i,j", a, b)
+- matvec(a,b) -> einsum("ij,j", a, b)
+- matmat(a,b) -> einsum("ij,jk", a, b)
+
+subscripts: The string of subscripts for einstein summation.
+nop: The number of operands
+op_in: The array of operands
+dtype: Either NULL, or the data type to force the calculation as.
+order: The order for the calculation/the output axes.
+casting: What kind of casts should be permitted.
+out: Either NULL, or an array into which the output should be placed.
+
+By default, the labels get placed in alphabetical order
+at the end of the output. So, if c = einsum("i,j", a, b)
+then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b)
+then c[i,j] = a[j]*b[i].
+
+Alternatively, you can control the output order or prevent
+an axis from being summed/force an axis to be summed by providing
+indices for the output. This allows us to turn 'trace' into
+'diag', for example.
+- diag(a) -> einsum("ii->i", a)
+- sum(a, axis=0) -> einsum("i...->", a)
+
+Subscripts at the beginning and end may be specified by
+putting an ellipsis "..." in the middle. For example,
+the function einsum("i...i", a) takes the diagonal of
+the first and last dimensions of the operand, and
+einsum("ij...,jk...->ik...") takes the matrix product using
+the first two indices of each operand instead of the last two.
+
+When there is only one operand, no axes being summed, and
+no output parameter, this function returns a view
+into the operand instead of making a copy.
+
+::
+
+ PyObject *
+ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER
+ order, PyArray_Descr *dtype, int subok)
+
+Creates a new array with the same shape as the provided one,
+with possible memory layout order and data type changes.
+
+prototype - The array the new one should be like.
+order - NPY_CORDER - C-contiguous result.
+NPY_FORTRANORDER - Fortran-contiguous result.
+NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise.
+NPY_KEEPORDER - Keeps the axis ordering of prototype.
+dtype - If not NULL, overrides the data type of the result.
+subok - If 1, use the prototype's array subtype, otherwise
+always create a base-class array.
+
+NOTE: If dtype is not NULL, steals the dtype reference. On failure or when
+dtype->subarray is true, dtype will be decrefed.
+
+::
+
+ int
+ PyArray_GetArrayParamsFromObject(PyObject *op, PyArray_Descr
+ *requested_dtype, npy_bool
+ writeable, PyArray_Descr
+ **out_dtype, int *out_ndim, npy_intp
+ *out_dims, PyArrayObject
+ **out_arr, PyObject *context)
+
+Retrieves the array parameters for viewing/converting an arbitrary
+PyObject* to a NumPy array. This allows the "innate type and shape"
+of Python list-of-lists to be discovered without
+actually converting to an array.
+
+In some cases, such as structured arrays and the __array__ interface,
+a data type needs to be used to make sense of the object. When
+this is needed, provide a Descr for 'requested_dtype', otherwise
+provide NULL. This reference is not stolen. Also, if the requested
+dtype doesn't modify the interpretation of the input, out_dtype will
+still get the "innate" dtype of the object, not the dtype passed
+in 'requested_dtype'.
+
+If writing to the value in 'op' is desired, set the boolean
+'writeable' to 1. This raises an error when 'op' is a scalar, list
+of lists, or other non-writeable 'op'.
+
+Result: When success (0 return value) is returned, either out_arr
+is filled with a non-NULL PyArrayObject and
+the rest of the parameters are untouched, or out_arr is
+filled with NULL, and the rest of the parameters are
+filled.
+
+Typical usage:
+
+PyArrayObject *arr = NULL;
+PyArray_Descr *dtype = NULL;
+int ndim = 0;
+npy_intp dims[NPY_MAXDIMS];
+
+if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype,
+&ndim, dims, &arr, NULL) < 0) {
+return NULL;
+}
+if (arr == NULL) {
+... validate/change dtype, validate flags, ndim, etc ...
+// Could make custom strides here too
+arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim,
+dims, NULL,
+is_f_order ? NPY_ARRAY_F_CONTIGUOUS : 0,
+NULL);
+if (arr == NULL) {
+return NULL;
+}
+if (PyArray_CopyObject(arr, op) < 0) {
+Py_DECREF(arr);
+return NULL;
+}
+}
+else {
+... in this case the other parameters weren't filled, just
+validate and possibly copy arr itself ...
+}
+... use arr ...
+
+::
+
+ int
+ PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE
+ *modes, int n)
+
+Convert an object to an array of n NPY_CLIPMODE values.
+This is intended to be used in functions where a different mode
+could be applied to each axis, like in ravel_multi_index.
+
+::
+
+ PyObject *
+ PyArray_MatrixProduct2(PyObject *op1, PyObject
+ *op2, PyArrayObject*out)
+
+Numeric.matrixproduct2(a,v,out)
+just like inner product but does the swapaxes stuff on the fly
+
+::
+
+ npy_bool
+ NpyIter_IsFirstVisit(NpyIter *iter, int iop)
+
+Checks to see whether this is the first time the elements
+of the specified reduction operand which the iterator points at are
+being seen for the first time. The function returns
+a reasonable answer for reduction operands and when buffering is
+disabled. The answer may be incorrect for buffered non-reduction
+operands.
+
+This function is intended to be used in EXTERNAL_LOOP mode only,
+and will produce some wrong answers when that mode is not enabled.
+
+If this function returns true, the caller should also
+check the inner loop stride of the operand, because if
+that stride is 0, then only the first element of the innermost
+external loop is being visited for the first time.
+
+WARNING: For performance reasons, 'iop' is not bounds-checked,
+it is not confirmed that 'iop' is actually a reduction
+operand, and it is not confirmed that EXTERNAL_LOOP
+mode is enabled. These checks are the responsibility of
+the caller, and should be done outside of any inner loops.
+
+::
+
+ int
+ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj)
+
+Sets the 'base' attribute of the array. This steals a reference
+to 'obj'.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ void
+ PyArray_CreateSortedStridePerm(int ndim, npy_intp
+ *strides, npy_stride_sort_item
+ *out_strideperm)
+
+
+This function populates the first ndim elements
+of strideperm with sorted descending by their absolute values.
+For example, the stride array (4, -2, 12) becomes
+[(2, 12), (0, 4), (1, -2)].
+
+::
+
+ void
+ PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
+
+
+Removes the axes flagged as True from the array,
+modifying it in place. If an axis flagged for removal
+has a shape entry bigger than one, this effectively selects
+index zero for that axis.
+
+WARNING: If an axis flagged for removal has a shape equal to zero,
+the array will point to invalid memory. The caller must
+validate this!
+If an axis flagged for removal has a shape larger than one,
+the aligned flag (and in the future the contiguous flags),
+may need explicit update.
+(check also NPY_RELAXED_STRIDES_CHECKING)
+
+For example, this can be used to remove the reduction axes
+from a reduction result once its computation is complete.
+
+::
+
+ void
+ PyArray_DebugPrint(PyArrayObject *obj)
+
+Prints the raw data of the ndarray in a form useful for debugging
+low-level C issues.
+
+::
+
+ int
+ PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
+
+
+This function does nothing if obj is writeable, and raises an exception
+(and returns -1) if obj is not writeable. It may also do other
+house-keeping, such as issuing warnings on arrays which are transitioning
+to become views. Always call this function at some point before writing to
+an array.
+
+'name' is a name for the array, used to give better error
+messages. Something like "assignment destination", "output array", or even
+just "array".
+
+::
+
+ int
+ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
+
+
+Precondition: 'arr' is a copy of 'base' (though possibly with different
+strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the
+->base pointer on 'arr', so that when 'arr' is destructed, it will copy any
+changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase
+
+Steals a reference to 'base'.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ void *
+ PyDataMem_NEW(size_t size)
+
+Allocates memory for array data.
+
+::
+
+ void
+ PyDataMem_FREE(void *ptr)
+
+Free memory for array data.
+
+::
+
+ void *
+ PyDataMem_RENEW(void *ptr, size_t size)
+
+Reallocate/resize memory for array data.
+
+::
+
+ PyDataMem_EventHookFunc *
+ PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void
+ *user_data, void **old_data)
+
+Sets the allocation event hook for numpy array data.
+Takes a PyDataMem_EventHookFunc *, which has the signature:
+void hook(void *old, void *new, size_t size, void *user_data).
+Also takes a void *user_data, and void **old_data.
+
+Returns a pointer to the previous hook or NULL. If old_data is
+non-NULL, the previous user_data pointer will be copied to it.
+
+If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW:
+result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data)
+PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data)
+result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data)
+
+When the hook is called, the GIL will be held by the calling
+thread. The hook should be written to be reentrant, if it performs
+operations that might cause new allocation events (such as the
+creation/destruction numpy objects, or creating/destroying Python
+objects which might cause a gc)
+
+::
+
+ void
+ PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject
+ **ret, int getmap)
+
+
+::
+
+ PyObject *
+ PyArray_MapIterArray(PyArrayObject *a, PyObject *index)
+
+
+Use advanced indexing to iterate an array.
+
+::
+
+ void
+ PyArray_MapIterNext(PyArrayMapIterObject *mit)
+
+This function needs to update the state of the map iterator
+and point mit->dataptr to the memory-location of the next object
+
+Note that this function never handles an extra operand but provides
+compatibility for an old (exposed) API.
+
+::
+
+ int
+ PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int
+ axis, NPY_SELECTKIND which)
+
+Partition an array in-place
+
+::
+
+ PyObject *
+ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int
+ axis, NPY_SELECTKIND which)
+
+ArgPartition an array
+
+::
+
+ int
+ PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind)
+
+Convert object to select kind
+
+::
+
+ void *
+ PyDataMem_NEW_ZEROED(size_t size, size_t elsize)
+
+Allocates zeroed memory for array data.
+
+::
+
+ int
+ PyArray_CheckAnyScalarExact(PyObject *obj)
+
+return true an object is exactly a numpy scalar
+
+::
+
+ PyObject *
+ PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject
+ *index, int
+ copy_if_overlap, PyArrayObject
+ *extra_op)
+
+
+Same as PyArray_MapIterArray, but:
+
+If copy_if_overlap != 0, check if `a` has memory overlap with any of the
+arrays in `index` and with `extra_op`. If yes, make copies as appropriate
+to avoid problems if `a` is modified during the iteration.
+`iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set).
+
+::
+
+ int
+ PyArray_ResolveWritebackIfCopy(PyArrayObject *self)
+
+
+If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag,
+copy the local data to base, release the local data, and set flags
+appropriately. Return 0 if not relevant, 1 if success, < 0 on failure
+
+::
+
+ int
+ PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject
+ *base)
+
+
+Precondition: 'arr' is a copy of 'base' (though possibly with different
+strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the
+->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any
+changes back to 'base' before deallocating the array.
+
+Steals a reference to 'base'.
+
+Returns 0 on success, -1 on failure.
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h
new file mode 100644
index 0000000..45f008b
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h
@@ -0,0 +1,285 @@
+/*
+ * DON'T INCLUDE THIS DIRECTLY.
+ */
+
+#ifndef NPY_NDARRAYOBJECT_H
+#define NPY_NDARRAYOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#include "ndarraytypes.h"
+
+/* Includes the "function" C-API -- these are all stored in a
+ list of pointers --- one for each file
+ The two lists are concatenated into one in multiarray.
+
+ They are available as import_array()
+*/
+
+#include "__multiarray_api.h"
+
+
+/* C-API that requires previous API to be defined */
+
+#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type)
+
+#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
+#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
+
+#define PyArray_HasArrayInterfaceType(op, type, context, out) \
+ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromArrayAttr(op, type, context)) != \
+ Py_NotImplemented))
+
+#define PyArray_HasArrayInterface(op, out) \
+ PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
+
+#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
+ (PyArray_NDIM((PyArrayObject *)op) == 0))
+
+#define PyArray_IsScalar(obj, cls) \
+ (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
+
+#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
+ PyArray_IsZeroDim(m))
+#if PY_MAJOR_VERSION >= 3
+#define PyArray_IsPythonNumber(obj) \
+ (PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj) \
+ (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
+ PyUnicode_Check(obj))
+#else
+#define PyArray_IsPythonNumber(obj) \
+ (PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
+ || PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj) \
+ (PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \
+ PyUnicode_Check(obj))
+#endif
+
+#define PyArray_IsAnyScalar(obj) \
+ (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
+
+#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
+ PyArray_CheckScalar(obj))
+
+
+#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
+ Py_INCREF(m), (m) : \
+ (PyArrayObject *)(PyArray_Copy(m)))
+
+#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
+ PyArray_CompareLists(PyArray_DIMS(a1), \
+ PyArray_DIMS(a2), \
+ PyArray_NDIM(a1)))
+
+#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
+#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
+#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
+
+#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
+ NULL)
+
+#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
+ PyArray_DescrFromType(type), 0, 0, 0, NULL)
+
+#define PyArray_FROM_OTF(m, type, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
+
+#define PyArray_FROMANY(m, type, min, max, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
+
+#define PyArray_ZEROS(m, dims, type, is_f_order) \
+ PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_EMPTY(m, dims, type, is_f_order) \
+ PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
+ PyArray_NBYTES(obj))
+#ifndef PYPY_VERSION
+#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
+#define NPY_REFCOUNT PyArray_REFCOUNT
+#endif
+#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE)
+
+#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT, NULL)
+
+#define PyArray_EquivArrTypes(a1, a2) \
+ PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
+
+#define PyArray_EquivByteorders(b1, b2) \
+ (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
+
+#define PyArray_SimpleNew(nd, dims, typenum) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
+
+#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
+ data, 0, NPY_ARRAY_CARRAY, NULL)
+
+#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
+ NULL, NULL, 0, NULL)
+
+#define PyArray_ToScalar(data, arr) \
+ PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
+
+
+/* These might be faster without the dereferencing of obj
+ going on inside -- of course an optimizing compiler should
+ inline the constants inside a for loop making it a moot point
+*/
+
+#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0]))
+
+#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1]))
+
+#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2]))
+
+#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2] + \
+ (l)*PyArray_STRIDES(obj)[3]))
+
+/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */
+static NPY_INLINE void
+PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
+{
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) ||
+ (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
+ }
+ }
+}
+
+#define PyArray_DESCR_REPLACE(descr) do { \
+ PyArray_Descr *_new_; \
+ _new_ = PyArray_DescrNew(descr); \
+ Py_XDECREF(descr); \
+ descr = _new_; \
+ } while(0)
+
+/* Copy should always return contiguous array */
+#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
+
+#define PyArray_FromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_BEHAVED | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_ENSURECOPY | \
+ NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_Cast(mp, type_num) \
+ PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
+
+#define PyArray_Take(ap, items, axis) \
+ PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
+
+#define PyArray_Put(ap, items, values) \
+ PyArray_PutTo(ap, items, values, NPY_RAISE)
+
+/* Compatibility with old Numeric stuff -- don't use in new code */
+
+#define PyArray_FromDimsAndData(nd, d, type, data) \
+ PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \
+ data)
+
+
+/*
+ Check to see if this key in the dictionary is the "title"
+ entry of the tuple (i.e. a duplicate dictionary entry in the fields
+ dict.
+*/
+
+static NPY_INLINE int
+NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
+{
+ PyObject *title;
+ if (PyTuple_GET_SIZE(value) != 3) {
+ return 0;
+ }
+ title = PyTuple_GET_ITEM(value, 2);
+ if (key == title) {
+ return 1;
+ }
+#ifdef PYPY_VERSION
+ /*
+ * On PyPy, dictionary keys do not always preserve object identity.
+ * Fall back to comparison by value.
+ */
+ if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
+ return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
+ }
+#if PY_VERSION_HEX < 0x03000000
+ if (PyString_Check(title) && PyString_Check(key)) {
+ return PyObject_Compare(title, key) == 0 ? 1 : 0;
+ }
+#endif
+#endif
+ return 0;
+}
+
+/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
+#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
+
+#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
+#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
+
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION)
+static NPY_INLINE void
+PyArray_XDECREF_ERR(PyArrayObject *arr)
+{
+ /* 2017-Nov-10 1.14 */
+ DEPRECATE("PyArray_XDECREF_ERR is deprecated, call "
+ "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead");
+ PyArray_DiscardWritebackIfCopy(arr);
+ Py_XDECREF(arr);
+}
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* NPY_NDARRAYOBJECT_H */
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h
new file mode 100644
index 0000000..b0b749c
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h
@@ -0,0 +1,1838 @@
+#ifndef NDARRAYTYPES_H
+#define NDARRAYTYPES_H
+
+#include "npy_common.h"
+#include "npy_endian.h"
+#include "npy_cpu.h"
+#include "utils.h"
+
+#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+
+/* Only use thread if configured in config and python supports it */
+#if defined WITH_THREAD && !NPY_NO_SMP
+ #define NPY_ALLOW_THREADS 1
+#else
+ #define NPY_ALLOW_THREADS 0
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) 0
+#endif
+
+#if !defined(_NPY_NO_DEPRECATIONS) && \
+ ((defined(__GNUC__)&& __GNUC__ >= 6) || \
+ __has_extension(attribute_deprecated_with_message))
+#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text)))
+#else
+#define NPY_ATTR_DEPRECATE(text)
+#endif
+
+/*
+ * There are several places in the code where an array of dimensions
+ * is allocated statically. This is the size of that static
+ * allocation.
+ *
+ * The array creation itself could have arbitrary dimensions but all
+ * the places where static allocation is used would need to be changed
+ * to dynamic (including inside of several structures)
+ */
+
+#define NPY_MAXDIMS 32
+#define NPY_MAXARGS 32
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define NPY_FAIL 0
+#define NPY_SUCCEED 1
+
+/*
+ * Binary compatibility version number. This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version. This number is increased whenever a change is
+ * made to the C-API -- whether it breaks binary compatibility or not.
+ * Some changes, such as adding a function pointer to the end of the
+ * function table, can be made without breaking binary compatibility.
+ * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION)
+ * would be increased. Whenever binary compatibility is broken, both
+ * NPY_VERSION and NPY_FEATURE_VERSION should be increased.
+ */
+#define NPY_FEATURE_VERSION NPY_API_VERSION
+
+enum NPY_TYPES { NPY_BOOL=0,
+ NPY_BYTE, NPY_UBYTE,
+ NPY_SHORT, NPY_USHORT,
+ NPY_INT, NPY_UINT,
+ NPY_LONG, NPY_ULONG,
+ NPY_LONGLONG, NPY_ULONGLONG,
+ NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+ NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
+ NPY_OBJECT=17,
+ NPY_STRING, NPY_UNICODE,
+ NPY_VOID,
+ /*
+ * New 1.6 types appended, may be integrated
+ * into the above in 2.0.
+ */
+ NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+
+ NPY_NTYPES,
+ NPY_NOTYPE,
+ NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"),
+ NPY_USERDEF=256, /* leave room for characters */
+
+ /* The number of types not including the new 1.6 types */
+ NPY_NTYPES_ABI_COMPATIBLE=21
+};
+#ifdef _MSC_VER
+#pragma deprecated(NPY_CHAR)
+#endif
+
+/* basetype array priority */
+#define NPY_PRIORITY 0.0
+
+/* default subtype priority */
+#define NPY_SUBTYPE_PRIORITY 1.0
+
+/* default scalar priority */
+#define NPY_SCALAR_PRIORITY -1000000.0
+
+/* How many floating point types are there (excluding half) */
+#define NPY_NUM_FLOATTYPE 3
+
+/*
+ * These characters correspond to the array type and the struct
+ * module
+ */
+
+enum NPY_TYPECHAR {
+ NPY_BOOLLTR = '?',
+ NPY_BYTELTR = 'b',
+ NPY_UBYTELTR = 'B',
+ NPY_SHORTLTR = 'h',
+ NPY_USHORTLTR = 'H',
+ NPY_INTLTR = 'i',
+ NPY_UINTLTR = 'I',
+ NPY_LONGLTR = 'l',
+ NPY_ULONGLTR = 'L',
+ NPY_LONGLONGLTR = 'q',
+ NPY_ULONGLONGLTR = 'Q',
+ NPY_HALFLTR = 'e',
+ NPY_FLOATLTR = 'f',
+ NPY_DOUBLELTR = 'd',
+ NPY_LONGDOUBLELTR = 'g',
+ NPY_CFLOATLTR = 'F',
+ NPY_CDOUBLELTR = 'D',
+ NPY_CLONGDOUBLELTR = 'G',
+ NPY_OBJECTLTR = 'O',
+ NPY_STRINGLTR = 'S',
+ NPY_STRINGLTR2 = 'a',
+ NPY_UNICODELTR = 'U',
+ NPY_VOIDLTR = 'V',
+ NPY_DATETIMELTR = 'M',
+ NPY_TIMEDELTALTR = 'm',
+ NPY_CHARLTR = 'c',
+
+ /*
+ * No Descriptor, just a define -- this let's
+ * Python users specify an array of integers
+ * large enough to hold a pointer on the
+ * platform
+ */
+ NPY_INTPLTR = 'p',
+ NPY_UINTPLTR = 'P',
+
+ /*
+ * These are for dtype 'kinds', not dtype 'typecodes'
+ * as the above are for.
+ */
+ NPY_GENBOOLLTR ='b',
+ NPY_SIGNEDLTR = 'i',
+ NPY_UNSIGNEDLTR = 'u',
+ NPY_FLOATINGLTR = 'f',
+ NPY_COMPLEXLTR = 'c'
+};
+
+typedef enum {
+ NPY_QUICKSORT=0,
+ NPY_HEAPSORT=1,
+ NPY_MERGESORT=2
+} NPY_SORTKIND;
+#define NPY_NSORTS (NPY_MERGESORT + 1)
+
+
+typedef enum {
+ NPY_INTROSELECT=0
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
+ NPY_SEARCHLEFT=0,
+ NPY_SEARCHRIGHT=1
+} NPY_SEARCHSIDE;
+#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)
+
+
+typedef enum {
+ NPY_NOSCALAR=-1,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+} NPY_SCALARKIND;
+#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)
+
+/* For specifying array memory layout or iteration order */
+typedef enum {
+ /* Fortran order if inputs are all Fortran, C otherwise */
+ NPY_ANYORDER=-1,
+ /* C order */
+ NPY_CORDER=0,
+ /* Fortran order */
+ NPY_FORTRANORDER=1,
+ /* An order as close to the inputs as possible */
+ NPY_KEEPORDER=2
+} NPY_ORDER;
+
+/* For specifying allowed casting in operations which support it */
+typedef enum {
+ /* Only allow identical types */
+ NPY_NO_CASTING=0,
+ /* Allow identical and byte swapped types */
+ NPY_EQUIV_CASTING=1,
+ /* Only allow safe casts */
+ NPY_SAFE_CASTING=2,
+ /* Allow safe casts or casts within the same kind */
+ NPY_SAME_KIND_CASTING=3,
+ /* Allow any casts */
+ NPY_UNSAFE_CASTING=4
+} NPY_CASTING;
+
+typedef enum {
+ NPY_CLIP=0,
+ NPY_WRAP=1,
+ NPY_RAISE=2
+} NPY_CLIPMODE;
+
+/* The special not-a-time (NaT) value */
+#define NPY_DATETIME_NAT NPY_MIN_INT64
+
+/*
+ * Upper bound on the length of a DATETIME ISO 8601 string
+ * YEAR: 21 (64-bit year)
+ * MONTH: 3
+ * DAY: 3
+ * HOURS: 3
+ * MINUTES: 3
+ * SECONDS: 3
+ * ATTOSECONDS: 1 + 3*6
+ * TIMEZONE: 5
+ * NULL TERMINATOR: 1
+ */
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)
+
+/* The FR in the unit names stands for frequency */
+typedef enum {
+ /* Force signed enum type, must be -1 for code compatibility */
+ NPY_FR_ERROR = -1, /* error or undetermined */
+
+ /* Start of valid units */
+ NPY_FR_Y = 0, /* Years */
+ NPY_FR_M = 1, /* Months */
+ NPY_FR_W = 2, /* Weeks */
+ /* Gap where 1.6 NPY_FR_B (value 3) was */
+ NPY_FR_D = 4, /* Days */
+ NPY_FR_h = 5, /* hours */
+ NPY_FR_m = 6, /* minutes */
+ NPY_FR_s = 7, /* seconds */
+ NPY_FR_ms = 8, /* milliseconds */
+ NPY_FR_us = 9, /* microseconds */
+ NPY_FR_ns = 10, /* nanoseconds */
+ NPY_FR_ps = 11, /* picoseconds */
+ NPY_FR_fs = 12, /* femtoseconds */
+ NPY_FR_as = 13, /* attoseconds */
+ NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */
+} NPY_DATETIMEUNIT;
+
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ * is technically one more than the actual number of units.
+ */
+#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
+#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
+
+/*
+ * Business day conventions for mapping invalid business
+ * days to valid business days.
+ */
+typedef enum {
+ /* Go forward in time to the following business day. */
+ NPY_BUSDAY_FORWARD,
+ NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,
+ /* Go backward in time to the preceding business day. */
+ NPY_BUSDAY_BACKWARD,
+ NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,
+ /*
+ * Go forward in time to the following business day, unless it
+ * crosses a month boundary, in which case go backward
+ */
+ NPY_BUSDAY_MODIFIEDFOLLOWING,
+ /*
+ * Go backward in time to the preceding business day, unless it
+ * crosses a month boundary, in which case go forward.
+ */
+ NPY_BUSDAY_MODIFIEDPRECEDING,
+ /* Produce a NaT for non-business days. */
+ NPY_BUSDAY_NAT,
+ /* Raise an exception for non-business days. */
+ NPY_BUSDAY_RAISE
+} NPY_BUSDAY_ROLL;
+
+/************************************************************
+ * NumPy Auxiliary Data for inner loops, sort functions, etc.
+ ************************************************************/
+
+/*
+ * When creating an auxiliary data struct, this should always appear
+ * as the first member, like this:
+ *
+ * typedef struct {
+ * NpyAuxData base;
+ * double constant;
+ * } constant_multiplier_aux_data;
+ */
+typedef struct NpyAuxData_tag NpyAuxData;
+
+/* Function pointers for freeing or cloning auxiliary data */
+typedef void (NpyAuxData_FreeFunc) (NpyAuxData *);
+typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);
+
+struct NpyAuxData_tag {
+ NpyAuxData_FreeFunc *free;
+ NpyAuxData_CloneFunc *clone;
+ /* To allow for a bit of expansion without breaking the ABI */
+ void *reserved[2];
+};
+
+/* Macros to use for freeing and cloning auxiliary data */
+#define NPY_AUXDATA_FREE(auxdata) \
+ do { \
+ if ((auxdata) != NULL) { \
+ (auxdata)->free(auxdata); \
+ } \
+ } while(0)
+#define NPY_AUXDATA_CLONE(auxdata) \
+ ((auxdata)->clone(auxdata))
+
+#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+#define NPY_STRINGIFY(x) #x
+#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
+
+ /*
+ * Macros to define how array, and dimension/strides data is
+ * allocated.
+ */
+
+ /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */
+
+#define NPY_USE_PYMEM 1
+
+#if NPY_USE_PYMEM == 1
+ /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python
+ 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released.
+ On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able
+ to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL
+ released is now a fatal error in debug mode. */
+# if PY_VERSION_HEX >= 0x03040000
+# define PyArray_malloc PyMem_RawMalloc
+# define PyArray_free PyMem_RawFree
+# define PyArray_realloc PyMem_RawRealloc
+# else
+# define PyArray_malloc PyMem_Malloc
+# define PyArray_free PyMem_Free
+# define PyArray_realloc PyMem_Realloc
+# endif
+#else
+#define PyArray_malloc malloc
+#define PyArray_free free
+#define PyArray_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size) \
+ ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))
+
+#define PyDimMem_FREE(ptr) PyArray_free(ptr)
+
+#define PyDimMem_RENEW(ptr,size) \
+ ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))
+
+/* forward declaration */
+struct _PyArray_Descr;
+
+/* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,
+ npy_intp, int, void *);
+
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);
+typedef npy_bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+/*
+ * These assume aligned and notswapped data -- a buffer will be used
+ * before or contiguous data will be obtained
+ */
+
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);
+
+typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,
+ npy_intp, void *);
+
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,
+ void *);
+
+/*
+ * XXX the ignore argument should be removed next time the API version
+ * is bumped. It used to be the separator.
+ */
+typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,
+ char *ignore, struct _PyArray_Descr *);
+typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,
+ struct _PyArray_Descr *);
+
+typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
+typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
+
+typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
+
+typedef int (PyArray_ScalarKindFunc)(void *);
+
+typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min,
+ void *max, void *out);
+typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in,
+ void *values, npy_intp nv);
+typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray,
+ npy_intp nindarray, npy_intp n_outer,
+ npy_intp m_middle, npy_intp nelem,
+ NPY_CLIPMODE clipmode);
+
+typedef struct {
+ npy_intp *ptr;
+ int len;
+} PyArray_Dims;
+
+typedef struct {
+ /*
+ * Functions to cast to most other standard types
+ * Can have some NULL entries. The types
+ * DATETIME, TIMEDELTA, and HALF go into the castdict
+ * even though they are built-in.
+ */
+ PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];
+
+ /* The next four functions *cannot* be NULL */
+
+ /*
+ * Functions to get and set items with standard Python types
+ * -- not array scalars
+ */
+ PyArray_GetItemFunc *getitem;
+ PyArray_SetItemFunc *setitem;
+
+ /*
+ * Copy and/or swap data. Memory areas may not overlap
+ * Use memmove first if they might
+ */
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+
+ /*
+ * Function to compare items
+ * Can be NULL
+ */
+ PyArray_CompareFunc *compare;
+
+ /*
+ * Function to select largest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmax;
+
+ /*
+ * Function to compute dot product
+ * Can be NULL
+ */
+ PyArray_DotFunc *dotfunc;
+
+ /*
+ * Function to scan an ASCII file and
+ * place a single value plus possible separator
+ * Can be NULL
+ */
+ PyArray_ScanFunc *scanfunc;
+
+ /*
+ * Function to read a single value from a string
+ * and adjust the pointer; Can be NULL
+ */
+ PyArray_FromStrFunc *fromstr;
+
+ /*
+ * Function to determine if data is zero or not
+ * If NULL a default version is
+ * used at Registration time.
+ */
+ PyArray_NonzeroFunc *nonzero;
+
+ /*
+ * Used for arange. Should return 0 on success
+ * and -1 on failure.
+ * Can be NULL.
+ */
+ PyArray_FillFunc *fill;
+
+ /*
+ * Function to fill arrays with scalar values
+ * Can be NULL
+ */
+ PyArray_FillWithScalarFunc *fillwithscalar;
+
+ /*
+ * Sorting functions
+ * Can be NULL
+ */
+ PyArray_SortFunc *sort[NPY_NSORTS];
+ PyArray_ArgSortFunc *argsort[NPY_NSORTS];
+
+ /*
+ * Dictionary of additional casting functions
+ * PyArray_VectorUnaryFuncs
+ * which can be populated to support casting
+ * to other registered types. Can be NULL
+ */
+ PyObject *castdict;
+
+ /*
+ * Functions useful for generalizing
+ * the casting rules.
+ * Can be NULL;
+ */
+ PyArray_ScalarKindFunc *scalarkind;
+ int **cancastscalarkindto;
+ int *cancastto;
+
+ PyArray_FastClipFunc *fastclip;
+ PyArray_FastPutmaskFunc *fastputmask;
+ PyArray_FastTakeFunc *fasttake;
+
+ /*
+ * Function to select smallest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmin;
+
+} PyArray_ArrFuncs;
+
+/* The item must be reference counted when it is inserted or extracted. */
+#define NPY_ITEM_REFCOUNT 0x01
+/* Same as needing REFCOUNT */
+#define NPY_ITEM_HASOBJECT 0x01
+/* Convert to list for pickling */
+#define NPY_LIST_PICKLE 0x02
+/* The item is a POINTER */
+#define NPY_ITEM_IS_POINTER 0x04
+/* memory needs to be initialized for this data-type */
+#define NPY_NEEDS_INIT 0x08
+/* operations need Python C-API so don't give-up thread. */
+#define NPY_NEEDS_PYAPI 0x10
+/* Use f.getitem when extracting elements of this data-type */
+#define NPY_USE_GETITEM 0x20
+/* Use f.setitem when setting creating 0-d array from this data-type.*/
+#define NPY_USE_SETITEM 0x40
+/* A sticky flag specifically for structured arrays */
+#define NPY_ALIGNED_STRUCT 0x80
+
+/*
+ *These are inherited for global data-type if any data-types in the
+ * field have them
+ */
+#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \
+ NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI)
+
+#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \
+ NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \
+ NPY_NEEDS_INIT | NPY_NEEDS_PYAPI)
+
+#define PyDataType_FLAGCHK(dtype, flag) \
+ (((dtype)->flags & (flag)) == (flag))
+
+#define PyDataType_REFCHK(dtype) \
+ PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)
+
+typedef struct _PyArray_Descr {
+ PyObject_HEAD
+ /*
+ * the type object representing an
+ * instance of this type -- should not
+ * be two type_numbers with the same type
+ * object.
+ */
+ PyTypeObject *typeobj;
+ /* kind for this type */
+ char kind;
+ /* unique-character representing this type */
+ char type;
+ /*
+ * '>' (big), '<' (little), '|'
+ * (not-applicable), or '=' (native).
+ */
+ char byteorder;
+ /* flags describing data type */
+ char flags;
+ /* number representing this type */
+ int type_num;
+ /* element size (itemsize) for this type */
+ int elsize;
+ /* alignment needed for this type */
+ int alignment;
+ /*
+ * Non-NULL if this type is
+ * is an array (C-contiguous)
+ * of some other type
+ */
+ struct _arr_descr *subarray;
+ /*
+ * The fields dictionary for this type
+ * For statically defined descr this
+ * is always Py_None
+ */
+ PyObject *fields;
+ /*
+ * An ordered tuple of field names or NULL
+ * if no fields are defined
+ */
+ PyObject *names;
+ /*
+ * a table of functions specific for each
+ * basic data descriptor
+ */
+ PyArray_ArrFuncs *f;
+ /* Metadata about this dtype */
+ PyObject *metadata;
+ /*
+ * Metadata specific to the C implementation
+ * of the particular dtype. This was added
+ * for NumPy 1.7.0.
+ */
+ NpyAuxData *c_metadata;
+ /* Cached hash value (-1 if not yet computed).
+ * This was added for NumPy 2.0.0.
+ */
+ npy_hash_t hash;
+} PyArray_Descr;
+
+typedef struct _arr_descr {
+ PyArray_Descr *base;
+ PyObject *shape; /* a tuple */
+} PyArray_ArrayDescr;
+
+/*
+ * The main array object structure.
+ *
+ * It has been recommended to use the inline functions defined below
+ * (PyArray_DATA and friends) to access fields here for a number of
+ * releases. Direct access to the members themselves is deprecated.
+ * To ensure that your code does not use deprecated access,
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * (or NPY_1_8_API_VERSION or higher as required).
+ */
+/* This struct will be moved to a private header in a future release */
+typedef struct tagPyArrayObject_fields {
+ PyObject_HEAD
+ /* Pointer to the raw data buffer */
+ char *data;
+ /* The number of dimensions, also called 'ndim' */
+ int nd;
+ /* The size in each dimension, also called 'shape' */
+ npy_intp *dimensions;
+ /*
+ * Number of bytes to jump to get to the
+ * next element in each dimension
+ */
+ npy_intp *strides;
+ /*
+ * This object is decref'd upon
+ * deletion of array. Except in the
+ * case of WRITEBACKIFCOPY which has
+ * special handling.
+ *
+ * For views it points to the original
+ * array, collapsed so no chains of
+ * views occur.
+ *
+ * For creation from buffer object it
+ * points to an object that should be
+ * decref'd on deletion
+ *
+ * For WRITEBACKIFCOPY flag this is an
+ * array to-be-updated upon calling
+ * PyArray_ResolveWritebackIfCopy
+ */
+ PyObject *base;
+ /* Pointer to type structure */
+ PyArray_Descr *descr;
+ /* Flags describing array -- see below */
+ int flags;
+ /* For weak references */
+ PyObject *weakreflist;
+} PyArrayObject_fields;
+
+/*
+ * To hide the implementation details, we only expose
+ * the Python struct HEAD.
+ */
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+/*
+ * Can't put this in npy_deprecated_api.h like the others.
+ * PyArrayObject field access is deprecated as of NumPy 1.7.
+ */
+typedef PyArrayObject_fields PyArrayObject;
+#else
+typedef struct tagPyArrayObject {
+ PyObject_HEAD
+} PyArrayObject;
+#endif
+
+#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields))
+
+/* Array Flags Object */
+typedef struct PyArrayFlagsObject {
+ PyObject_HEAD
+ PyObject *arr;
+ int flags;
+} PyArrayFlagsObject;
+
+/* Mirrors buffer object to ptr */
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *base;
+ void *ptr;
+ npy_intp len;
+ int flags;
+} PyArray_Chunk;
+
+typedef struct {
+ NPY_DATETIMEUNIT base;
+ int num;
+} PyArray_DatetimeMetaData;
+
+typedef struct {
+ NpyAuxData base;
+ PyArray_DatetimeMetaData meta;
+} PyArray_DatetimeDTypeMetaData;
+
+/*
+ * This structure contains an exploded view of a date-time value.
+ * NaT is represented by year == NPY_DATETIME_NAT.
+ */
+typedef struct {
+ npy_int64 year;
+ npy_int32 month, day, hour, min, sec, us, ps, as;
+} npy_datetimestruct;
+
+/* This is not used internally. */
+typedef struct {
+ npy_int64 day;
+ npy_int32 sec, us, ps, as;
+} npy_timedeltastruct;
+
+typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
+
+/*
+ * Means c-style contiguous (last index varies the fastest). The data
+ * elements right after each other.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_C_CONTIGUOUS 0x0001
+
+/*
+ * Set if array is a contiguous Fortran array: the first index varies
+ * the fastest in memory (strides array is reverse of C-contiguous
+ * array)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_F_CONTIGUOUS 0x0002
+
+/*
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional
+ * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements
+ * and the array is contiguous if ndarray.squeeze() is contiguous.
+ * I.e. dimensions for which `ndarray.shape[dimension] == 1` are
+ * ignored.
+ */
+
+/*
+ * If set, the array owns the data: it will be free'd when the array
+ * is deleted.
+ *
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_OWNDATA 0x0004
+
+/*
+ * An array never has the next four set; they're only used as parameter
+ * flags to the various FromAny functions
+ *
+ * This flag may be requested in constructor functions.
+ */
+
+/* Cause a cast to occur regardless of whether or not it is safe. */
+#define NPY_ARRAY_FORCECAST 0x0010
+
+/*
+ * Always copy the array. Returned arrays are always CONTIGUOUS,
+ * ALIGNED, and WRITEABLE.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURECOPY 0x0020
+
+/*
+ * Make sure the returned array is a base-class ndarray
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSUREARRAY 0x0040
+
+/*
+ * Make sure that the strides are in units of the element size Needed
+ * for some operations with record-arrays.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ELEMENTSTRIDES 0x0080
+
+/*
+ * Array data is aligned on the appropriate memory address for the type
+ * stored according to how the compiler would align things (e.g., an
+ * array of integers (4 bytes each) starts on a memory address that's
+ * a multiple of 4)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_ALIGNED 0x0100
+
+/*
+ * Array data has the native endianness
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_NOTSWAPPED 0x0200
+
+/*
+ * Array data is writeable
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEABLE 0x0400
+
+/*
+ * If this flag is set, then base contains a pointer to an array of
+ * the same size that should be updated with the current contents of
+ * this array when PyArray_ResolveWritebackIfCopy is called.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */
+#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000
+
+/*
+ * NOTE: there are also internal flags defined in multiarray/arrayobject.h,
+ * which start at bit 31 and work down.
+ */
+
+#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE)
+#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE | \
+ NPY_ARRAY_NOTSWAPPED)
+#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO)
+#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \
+ NPY_ARRAY_UPDATEIFCOPY)
+#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO)
+#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \
+ NPY_ARRAY_UPDATEIFCOPY)
+#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+
+#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+
+/* This flag is for the array interface, not PyArrayObject */
+#define NPY_ARR_HAS_DESCR 0x0800
+
+
+
+
+/*
+ * Size of internal buffers used for alignment Make BUFSIZE a multiple
+ * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned
+ */
+#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble))
+#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000)
+#define NPY_BUFSIZE 8192
+/* buffer stress test size: */
+/*#define NPY_BUFSIZE 17*/
+
+#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
+#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
+#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \
+ ((p).real < (q).real)))
+#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \
+ ((p).real > (q).real)))
+#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \
+ ((p).real <= (q).real)))
+#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \
+ ((p).real >= (q).real)))
+#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag))
+#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag))
+
+/*
+ * C API: consists of Macros and functions. The MACROS are defined
+ * here.
+ */
+
+
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED)
+
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)
+
+/* the variable is used in some places, so always define it */
+#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
+#if NPY_ALLOW_THREADS
+#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
+#define NPY_END_THREADS do { if (_save) \
+ { PyEval_RestoreThread(_save); _save = NULL;} } while (0);
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \
+ { _save = PyEval_SaveThread();} } while (0);
+
+#define NPY_BEGIN_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \
+ NPY_BEGIN_THREADS;} while (0);
+
+#define NPY_END_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \
+ NPY_END_THREADS; } while (0);
+
+#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__;
+#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0);
+#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0);
+#else
+#define NPY_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS
+#define NPY_END_THREADS
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size)
+#define NPY_BEGIN_THREADS_DESCR(dtype)
+#define NPY_END_THREADS_DESCR(dtype)
+#define NPY_ALLOW_C_API_DEF
+#define NPY_ALLOW_C_API
+#define NPY_DISABLE_C_API
+#endif
+
+/**********************************
+ * The nditer object, added in 1.6
+ **********************************/
+
+/* The actual structure of the iterator is an internal detail */
+typedef struct NpyIter_InternalOnly NpyIter;
+
+/* Iterator function pointers that may be specialized */
+typedef int (NpyIter_IterNextFunc)(NpyIter *iter);
+typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter,
+ npy_intp *outcoords);
+
+/*** Global flags that may be passed to the iterator constructors ***/
+
+/* Track an index representing C order */
+#define NPY_ITER_C_INDEX 0x00000001
+/* Track an index representing Fortran order */
+#define NPY_ITER_F_INDEX 0x00000002
+/* Track a multi-index */
+#define NPY_ITER_MULTI_INDEX 0x00000004
+/* User code external to the iterator does the 1-dimensional innermost loop */
+#define NPY_ITER_EXTERNAL_LOOP 0x00000008
+/* Convert all the operands to a common data type */
+#define NPY_ITER_COMMON_DTYPE 0x00000010
+/* Operands may hold references, requiring API access during iteration */
+#define NPY_ITER_REFS_OK 0x00000020
+/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */
+#define NPY_ITER_ZEROSIZE_OK 0x00000040
+/* Permits reductions (size-0 stride with dimension size > 1) */
+#define NPY_ITER_REDUCE_OK 0x00000080
+/* Enables sub-range iteration */
+#define NPY_ITER_RANGED 0x00000100
+/* Enables buffering */
+#define NPY_ITER_BUFFERED 0x00000200
+/* When buffering is enabled, grows the inner loop if possible */
+#define NPY_ITER_GROWINNER 0x00000400
+/* Delay allocation of buffers until first Reset* call */
+#define NPY_ITER_DELAY_BUFALLOC 0x00000800
+/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */
+#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000
+/*
+ * If output operands overlap with other operands (based on heuristics that
+ * has false positives but no false negatives), make temporary copies to
+ * eliminate overlap.
+ */
+#define NPY_ITER_COPY_IF_OVERLAP 0x00002000
+
+/*** Per-operand flags that may be passed to the iterator constructors ***/
+
+/* The operand will be read from and written to */
+#define NPY_ITER_READWRITE 0x00010000
+/* The operand will only be read from */
+#define NPY_ITER_READONLY 0x00020000
+/* The operand will only be written to */
+#define NPY_ITER_WRITEONLY 0x00040000
+/* The operand's data must be in native byte order */
+#define NPY_ITER_NBO 0x00080000
+/* The operand's data must be aligned */
+#define NPY_ITER_ALIGNED 0x00100000
+/* The operand's data must be contiguous (within the inner loop) */
+#define NPY_ITER_CONTIG 0x00200000
+/* The operand may be copied to satisfy requirements */
+#define NPY_ITER_COPY 0x00400000
+/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */
+#define NPY_ITER_UPDATEIFCOPY 0x00800000
+/* Allocate the operand if it is NULL */
+#define NPY_ITER_ALLOCATE 0x01000000
+/* If an operand is allocated, don't use any subtype */
+#define NPY_ITER_NO_SUBTYPE 0x02000000
+/* This is a virtual array slot, operand is NULL but temporary data is there */
+#define NPY_ITER_VIRTUAL 0x04000000
+/* Require that the dimension match the iterator dimensions exactly */
+#define NPY_ITER_NO_BROADCAST 0x08000000
+/* A mask is being used on this array, affects buffer -> array copy */
+#define NPY_ITER_WRITEMASKED 0x10000000
+/* This array is the mask for all WRITEMASKED operands */
+#define NPY_ITER_ARRAYMASK 0x20000000
+/* Assume iterator order data access for COPY_IF_OVERLAP */
+#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000
+
+#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff
+#define NPY_ITER_PER_OP_FLAGS 0xffff0000
+
+
+/*****************************
+ * Basic iterator object
+ *****************************/
+
+/* FWD declaration */
+typedef struct PyArrayIterObject_tag PyArrayIterObject;
+
+/*
+ * type of the function which translates a set of coordinates to a
+ * pointer to the data
+ */
+typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*);
+
+struct PyArrayIterObject_tag {
+ PyObject_HEAD
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+} ;
+
+
+/* Iterator API */
+#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type)
+
+#define _PyAIT(it) ((PyArrayIterObject *)(it))
+#define PyArray_ITER_RESET(it) do { \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ memset(_PyAIT(it)->coordinates, 0, \
+ (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \
+} while (0)
+
+#define _PyArray_ITER_NEXT1(it) do { \
+ (it)->dataptr += _PyAIT(it)->strides[0]; \
+ (it)->coordinates[0]++; \
+} while (0)
+
+#define _PyArray_ITER_NEXT2(it) do { \
+ if ((it)->coordinates[1] < (it)->dims_m1[1]) { \
+ (it)->coordinates[1]++; \
+ (it)->dataptr += (it)->strides[1]; \
+ } \
+ else { \
+ (it)->coordinates[1] = 0; \
+ (it)->coordinates[0]++; \
+ (it)->dataptr += (it)->strides[0] - \
+ (it)->backstrides[1]; \
+ } \
+} while (0)
+
+#define PyArray_ITER_NEXT(it) do { \
+ _PyAIT(it)->index++; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyArray_ITER_NEXT1(_PyAIT(it)); \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+ else if (_PyAIT(it)->nd_m1 == 1) { \
+ _PyArray_ITER_NEXT2(_PyAIT(it)); \
+ } \
+ else { \
+ int __npy_i; \
+ for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \
+ if (_PyAIT(it)->coordinates[__npy_i] < \
+ _PyAIT(it)->dims_m1[__npy_i]) { \
+ _PyAIT(it)->coordinates[__npy_i]++; \
+ _PyAIT(it)->dataptr += \
+ _PyAIT(it)->strides[__npy_i]; \
+ break; \
+ } \
+ else { \
+ _PyAIT(it)->coordinates[__npy_i] = 0; \
+ _PyAIT(it)->dataptr -= \
+ _PyAIT(it)->backstrides[__npy_i]; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO(it, destination) do { \
+ int __npy_i; \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \
+ if (destination[__npy_i] < 0) { \
+ destination[__npy_i] += \
+ _PyAIT(it)->dims_m1[__npy_i]+1; \
+ } \
+ _PyAIT(it)->dataptr += destination[__npy_i] * \
+ _PyAIT(it)->strides[__npy_i]; \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ destination[__npy_i]; \
+ _PyAIT(it)->index += destination[__npy_i] * \
+ ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \
+ _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO1D(it, ind) do { \
+ int __npy_i; \
+ npy_intp __npy_ind = (npy_intp) (ind); \
+ if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \
+ _PyAIT(it)->index = __npy_ind; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * _PyAIT(it)->strides[0]; \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+ else { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
+ __npy_i++) { \
+ _PyAIT(it)->dataptr += \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]) \
+ * _PyAIT(it)->strides[__npy_i]; \
+ __npy_ind %= _PyAIT(it)->factors[__npy_i]; \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr))
+
+#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size)
+
+
+/*
+ * Any object passed to PyArray_Broadcast must be binary compatible
+ * with this structure.
+ */
+
+typedef struct {
+ PyObject_HEAD
+ int numiter; /* number of iters */
+ npy_intp size; /* broadcasted size */
+ npy_intp index; /* current index */
+ int nd; /* number of dims */
+ npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */
+ PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */
+} PyArrayMultiIterObject;
+
+#define _PyMIT(m) ((PyArrayMultiIterObject *)(m))
+#define PyArray_MultiIter_RESET(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index = 0; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_NEXT(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index++; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_GOTO(multi, dest) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_GOTO1D(multi, ind) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_DATA(multi, i) \
+ ((void *)(_PyMIT(multi)->iters[i]->dataptr))
+
+#define PyArray_MultiIter_NEXTi(multi, i) \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[i])
+
+#define PyArray_MultiIter_NOTDONE(multi) \
+ (_PyMIT(multi)->index < _PyMIT(multi)->size)
+
+
+/*
+ * Store the information needed for fancy-indexing over an array. The
+ * fields are slightly unordered to keep consec, dataptr and subspace
+ * where they were originally.
+ */
+typedef struct {
+ PyObject_HEAD
+ /*
+ * Multi-iterator portion --- needs to be present in this
+ * order to work with PyArray_Broadcast
+ */
+
+ int numiter; /* number of index-array
+ iterators */
+ npy_intp size; /* size of broadcasted
+ result */
+ npy_intp index; /* current index */
+ int nd; /* number of dims */
+ npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */
+ NpyIter *outer; /* index objects
+ iterator */
+ void *unused[NPY_MAXDIMS - 2];
+ PyArrayObject *array;
+ /* Flat iterator for the indexed array. For compatibility solely. */
+ PyArrayIterObject *ait;
+
+ /*
+ * Subspace array. For binary compatibility (was an iterator,
+ * but only the check for NULL should be used).
+ */
+ PyArrayObject *subspace;
+
+ /*
+ * if subspace iteration, then this is the array of axes in
+ * the underlying array represented by the index objects
+ */
+ int iteraxes[NPY_MAXDIMS];
+ npy_intp fancy_strides[NPY_MAXDIMS];
+
+ /* pointer when all fancy indices are 0 */
+ char *baseoffset;
+
+ /*
+ * after binding consec denotes at which axis the fancy axes
+ * are inserted.
+ */
+ int consec;
+ char *dataptr;
+
+ int nd_fancy;
+ npy_intp fancy_dims[NPY_MAXDIMS];
+
+ /* Whether the iterator (any of the iterators) requires API */
+ int needs_api;
+
+ /*
+ * Extra op information.
+ */
+ PyArrayObject *extra_op;
+ PyArray_Descr *extra_op_dtype; /* desired dtype */
+ npy_uint32 *extra_op_flags; /* Iterator flags */
+
+ NpyIter *extra_op_iter;
+ NpyIter_IterNextFunc *extra_op_next;
+ char **extra_op_ptrs;
+
+ /*
+ * Information about the iteration state.
+ */
+ NpyIter_IterNextFunc *outer_next;
+ char **outer_ptrs;
+ npy_intp *outer_strides;
+
+ /*
+ * Information about the subspace iterator.
+ */
+ NpyIter *subspace_iter;
+ NpyIter_IterNextFunc *subspace_next;
+ char **subspace_ptrs;
+ npy_intp *subspace_strides;
+
+ /* Count for the external loop (which ever it is) for API iteration */
+ npy_intp iter_count;
+
+} PyArrayMapIterObject;
+
+enum {
+ NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,
+ NPY_NEIGHBORHOOD_ITER_ONE_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,
+ NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+};
+
+typedef struct {
+ PyObject_HEAD
+
+ /*
+ * PyArrayIterObject part: keep this in this exact order
+ */
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+
+ /*
+ * New members
+ */
+ npy_intp nd;
+
+ /* Dimensions is the dimension of the array */
+ npy_intp dimensions[NPY_MAXDIMS];
+
+ /*
+ * Neighborhood points coordinates are computed relatively to the
+ * point pointed by _internal_iter
+ */
+ PyArrayIterObject* _internal_iter;
+ /*
+ * To keep a reference to the representation of the constant value
+ * for constant padding
+ */
+ char* constant;
+
+ int mode;
+} PyArrayNeighborhoodIterObject;
+
+/*
+ * Neighborhood iterator API
+ */
+
+/* General: those work for any mode */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
+#if 0
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
+#endif
+
+/*
+ * Include inline implementations - functions defined there are not
+ * considered public API
+ */
+#define _NPY_INCLUDE_NEIGHBORHOOD_IMP
+#include "_neighborhood_iterator_imp.h"
+#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP
+
+/* The default array type */
+#define NPY_DEFAULT_TYPE NPY_DOUBLE
+
+/*
+ * All sorts of useful ways to look into a PyArrayObject. It is recommended
+ * to use PyArrayObject * objects instead of always casting from PyObject *,
+ * for improved type checking.
+ *
+ * In many cases here the macro versions of the accessors are deprecated,
+ * but can't be immediately changed to inline functions because the
+ * preexisting macros accept PyObject * and do automatic casts. Inline
+ * functions accepting PyArrayObject * provides for some compile-time
+ * checking of correctness when working with these objects in C.
+ */
+
+#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \
+ PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \
+ PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS))
+
+#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \
+ (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)))
+
+#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \
+ NPY_ARRAY_F_CONTIGUOUS : 0))
+
+#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API))
+/*
+ * Changing access macros into functions, to allow for future hiding
+ * of the internal memory layout. This later hiding will allow the 2.x series
+ * to change the internal representation of arrays without affecting
+ * ABI compatibility.
+ */
+
+static NPY_INLINE int
+PyArray_NDIM(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->nd;
+}
+
+static NPY_INLINE void *
+PyArray_DATA(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static NPY_INLINE char *
+PyArray_BYTES(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_DIMS(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_STRIDES(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->strides;
+}
+
+static NPY_INLINE npy_intp
+PyArray_DIM(const PyArrayObject *arr, int idim)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions[idim];
+}
+
+static NPY_INLINE npy_intp
+PyArray_STRIDE(const PyArrayObject *arr, int istride)
+{
+ return ((PyArrayObject_fields *)arr)->strides[istride];
+}
+
+static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject *
+PyArray_BASE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->base;
+}
+
+static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr *
+PyArray_DESCR(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static NPY_INLINE int
+PyArray_FLAGS(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->flags;
+}
+
+static NPY_INLINE npy_intp
+PyArray_ITEMSIZE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->elsize;
+}
+
+static NPY_INLINE int
+PyArray_TYPE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->type_num;
+}
+
+static NPY_INLINE int
+PyArray_CHKFLAGS(const PyArrayObject *arr, int flags)
+{
+ return (PyArray_FLAGS(arr) & flags) == flags;
+}
+
+static NPY_INLINE PyObject *
+PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->f->getitem(
+ (void *)itemptr, (PyArrayObject *)arr);
+}
+
+static NPY_INLINE int
+PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
+{
+ return ((PyArrayObject_fields *)arr)->descr->f->setitem(
+ v, itemptr, arr);
+}
+
+#else
+
+/* These macros are deprecated as of NumPy 1.7. */
+#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd)
+#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions)
+#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides)
+#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n])
+#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n])
+#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base)
+#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr)
+#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags)
+#define PyArray_CHKFLAGS(m, FLAGS) \
+ ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS))
+#define PyArray_ITEMSIZE(obj) \
+ (((PyArrayObject_fields *)(obj))->descr->elsize)
+#define PyArray_TYPE(obj) \
+ (((PyArrayObject_fields *)(obj))->descr->type_num)
+#define PyArray_GETITEM(obj,itemptr) \
+ PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \
+ (PyArrayObject *)(obj))
+
+#define PyArray_SETITEM(obj,itemptr,v) \
+ PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \
+ (char *)(itemptr), \
+ (PyArrayObject *)(obj))
+#endif
+
+static NPY_INLINE PyArray_Descr *
+PyArray_DTYPE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_SHAPE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+/*
+ * Enables the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static NPY_INLINE void
+PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags |= flags;
+}
+
+/*
+ * Clears the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static NPY_INLINE void
+PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags &= ~flags;
+}
+
+#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)
+
+#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \
+ ((type) == NPY_USHORT) || \
+ ((type) == NPY_UINT) || \
+ ((type) == NPY_ULONG) || \
+ ((type) == NPY_ULONGLONG))
+
+#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \
+ ((type) == NPY_SHORT) || \
+ ((type) == NPY_INT) || \
+ ((type) == NPY_LONG) || \
+ ((type) == NPY_LONGLONG))
+
+#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \
+ ((type) <= NPY_ULONGLONG))
+
+#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \
+ ((type) <= NPY_LONGDOUBLE)) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \
+ ((type) == NPY_UNICODE))
+
+#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \
+ ((type) <= NPY_CLONGDOUBLE))
+
+#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \
+ ((type) == NPY_DOUBLE) || \
+ ((type) == NPY_CDOUBLE) || \
+ ((type) == NPY_BOOL) || \
+ ((type) == NPY_OBJECT ))
+
+#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \
+ ((type) <=NPY_VOID))
+
+#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \
+ ((type) <=NPY_TIMEDELTA))
+
+#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \
+ ((type) < NPY_USERDEF+ \
+ NPY_NUMUSERTYPES))
+
+#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \
+ PyTypeNum_ISUSERDEF(type))
+
+#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
+
+
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
+#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
+#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0)
+#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
+
+#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
+#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))
+#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))
+#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))
+#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))
+#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))
+#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj))
+#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj))
+#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))
+#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))
+#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))
+#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj))
+
+ /*
+ * FIXME: This should check for a flag on the data-type that
+ * states whether or not it is variable length. Because the
+ * ISFLEXIBLE check is hard-coded to the built-in data-types.
+ */
+#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+
+#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj))
+
+
+#define NPY_LITTLE '<'
+#define NPY_BIG '>'
+#define NPY_NATIVE '='
+#define NPY_SWAP 's'
+#define NPY_IGNORE '|'
+
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+#define NPY_NATBYTE NPY_BIG
+#define NPY_OPPBYTE NPY_LITTLE
+#else
+#define NPY_NATBYTE NPY_LITTLE
+#define NPY_OPPBYTE NPY_BIG
+#endif
+
+#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE)
+#define PyArray_IsNativeByteOrder PyArray_ISNBO
+#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)
+#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \
+ PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)
+
+
+#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder)
+#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d))
+
+/************************************************************
+ * A struct used by PyArray_CreateSortedStridePerm, new in 1.7.
+ ************************************************************/
+
+typedef struct {
+ npy_intp perm, stride;
+} npy_stride_sort_item;
+
+/************************************************************
+ * This is the form of the struct that's returned pointed by the
+ * PyCObject attribute of an array __array_struct__. See
+ * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
+ * documentation.
+ ************************************************************/
+typedef struct {
+ int two; /*
+ * contains the integer 2 as a sanity
+ * check
+ */
+
+ int nd; /* number of dimensions */
+
+ char typekind; /*
+ * kind in array --- character code of
+ * typestr
+ */
+
+ int itemsize; /* size of each element */
+
+ int flags; /*
+ * how should be data interpreted. Valid
+ * flags are CONTIGUOUS (1), F_CONTIGUOUS (2),
+ * ALIGNED (0x100), NOTSWAPPED (0x200), and
+ * WRITEABLE (0x400). ARR_HAS_DESCR (0x800)
+ * states that arrdescr field is present in
+ * structure
+ */
+
+ npy_intp *shape; /*
+ * A length-nd array of shape
+ * information
+ */
+
+ npy_intp *strides; /* A length-nd array of stride information */
+
+ void *data; /* A pointer to the first element of the array */
+
+ PyObject *descr; /*
+ * A list of fields or NULL (ignored if flags
+ * does not have ARR_HAS_DESCR flag set)
+ */
+} PyArrayInterface;
+
+/*
+ * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions.
+ * See the documentation for PyDataMem_SetEventHook.
+ */
+typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
+ void *user_data);
+
+/*
+ * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files
+ * npy_*_*_deprecated_api.h are only included from here and nowhere else.
+ */
+#ifdef NPY_DEPRECATED_INCLUDES
+#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."
+#endif
+#define NPY_DEPRECATED_INCLUDES
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+#include "npy_1_7_deprecated_api.h"
+#endif
+/*
+ * There is no file npy_1_8_deprecated_api.h since there are no additional
+ * deprecated API features in NumPy 1.8.
+ *
+ * Note to maintainers: insert code like the following in future NumPy
+ * versions.
+ *
+ * #if !defined(NPY_NO_DEPRECATED_API) || \
+ * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)
+ * #include "npy_1_9_deprecated_api.h"
+ * #endif
+ */
+#undef NPY_DEPRECATED_INCLUDES
+
+#endif /* NPY_ARRAYTYPES_H */
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h
new file mode 100644
index 0000000..041f301
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/noprefix.h
@@ -0,0 +1,212 @@
+#ifndef NPY_NOPREFIX_H
+#define NPY_NOPREFIX_H
+
+/*
+ * You can directly include noprefix.h as a backward
+ * compatibility measure
+ */
+#ifndef NPY_NO_PREFIX
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+#endif
+
+#define SIGSETJMP NPY_SIGSETJMP
+#define SIGLONGJMP NPY_SIGLONGJMP
+#define SIGJMP_BUF NPY_SIGJMP_BUF
+
+#define MAX_DIMS NPY_MAXDIMS
+
+#define longlong npy_longlong
+#define ulonglong npy_ulonglong
+#define Bool npy_bool
+#define longdouble npy_longdouble
+#define byte npy_byte
+
+#ifndef _BSD_SOURCE
+#define ushort npy_ushort
+#define uint npy_uint
+#define ulong npy_ulong
+#endif
+
+#define ubyte npy_ubyte
+#define ushort npy_ushort
+#define uint npy_uint
+#define ulong npy_ulong
+#define cfloat npy_cfloat
+#define cdouble npy_cdouble
+#define clongdouble npy_clongdouble
+#define Int8 npy_int8
+#define UInt8 npy_uint8
+#define Int16 npy_int16
+#define UInt16 npy_uint16
+#define Int32 npy_int32
+#define UInt32 npy_uint32
+#define Int64 npy_int64
+#define UInt64 npy_uint64
+#define Int128 npy_int128
+#define UInt128 npy_uint128
+#define Int256 npy_int256
+#define UInt256 npy_uint256
+#define Float16 npy_float16
+#define Complex32 npy_complex32
+#define Float32 npy_float32
+#define Complex64 npy_complex64
+#define Float64 npy_float64
+#define Complex128 npy_complex128
+#define Float80 npy_float80
+#define Complex160 npy_complex160
+#define Float96 npy_float96
+#define Complex192 npy_complex192
+#define Float128 npy_float128
+#define Complex256 npy_complex256
+#define intp npy_intp
+#define uintp npy_uintp
+#define datetime npy_datetime
+#define timedelta npy_timedelta
+
+#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
+#define SIZEOF_INTP NPY_SIZEOF_INTP
+#define SIZEOF_UINTP NPY_SIZEOF_UINTP
+#define SIZEOF_HALF NPY_SIZEOF_HALF
+#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
+#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
+#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
+
+#define LONGLONG_FMT NPY_LONGLONG_FMT
+#define ULONGLONG_FMT NPY_ULONGLONG_FMT
+#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX
+#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX
+
+#define MAX_INT8 127
+#define MIN_INT8 -128
+#define MAX_UINT8 255
+#define MAX_INT16 32767
+#define MIN_INT16 -32768
+#define MAX_UINT16 65535
+#define MAX_INT32 2147483647
+#define MIN_INT32 (-MAX_INT32 - 1)
+#define MAX_UINT32 4294967295U
+#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
+#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
+#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
+#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
+#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
+#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+
+#define MAX_BYTE NPY_MAX_BYTE
+#define MIN_BYTE NPY_MIN_BYTE
+#define MAX_UBYTE NPY_MAX_UBYTE
+#define MAX_SHORT NPY_MAX_SHORT
+#define MIN_SHORT NPY_MIN_SHORT
+#define MAX_USHORT NPY_MAX_USHORT
+#define MAX_INT NPY_MAX_INT
+#define MIN_INT NPY_MIN_INT
+#define MAX_UINT NPY_MAX_UINT
+#define MAX_LONG NPY_MAX_LONG
+#define MIN_LONG NPY_MIN_LONG
+#define MAX_ULONG NPY_MAX_ULONG
+#define MAX_LONGLONG NPY_MAX_LONGLONG
+#define MIN_LONGLONG NPY_MIN_LONGLONG
+#define MAX_ULONGLONG NPY_MAX_ULONGLONG
+#define MIN_DATETIME NPY_MIN_DATETIME
+#define MAX_DATETIME NPY_MAX_DATETIME
+#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
+#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
+
+#define BITSOF_BOOL NPY_BITSOF_BOOL
+#define BITSOF_CHAR NPY_BITSOF_CHAR
+#define BITSOF_SHORT NPY_BITSOF_SHORT
+#define BITSOF_INT NPY_BITSOF_INT
+#define BITSOF_LONG NPY_BITSOF_LONG
+#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG
+#define BITSOF_HALF NPY_BITSOF_HALF
+#define BITSOF_FLOAT NPY_BITSOF_FLOAT
+#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE
+#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE
+#define BITSOF_DATETIME NPY_BITSOF_DATETIME
+#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA
+
+#define _pya_malloc PyArray_malloc
+#define _pya_free PyArray_free
+#define _pya_realloc PyArray_realloc
+
+#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF
+#define BEGIN_THREADS NPY_BEGIN_THREADS
+#define END_THREADS NPY_END_THREADS
+#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF
+#define ALLOW_C_API NPY_ALLOW_C_API
+#define DISABLE_C_API NPY_DISABLE_C_API
+
+#define PY_FAIL NPY_FAIL
+#define PY_SUCCEED NPY_SUCCEED
+
+#ifndef TRUE
+#define TRUE NPY_TRUE
+#endif
+
+#ifndef FALSE
+#define FALSE NPY_FALSE
+#endif
+
+#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT
+
+#define CONTIGUOUS NPY_CONTIGUOUS
+#define C_CONTIGUOUS NPY_C_CONTIGUOUS
+#define FORTRAN NPY_FORTRAN
+#define F_CONTIGUOUS NPY_F_CONTIGUOUS
+#define OWNDATA NPY_OWNDATA
+#define FORCECAST NPY_FORCECAST
+#define ENSURECOPY NPY_ENSURECOPY
+#define ENSUREARRAY NPY_ENSUREARRAY
+#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES
+#define ALIGNED NPY_ALIGNED
+#define NOTSWAPPED NPY_NOTSWAPPED
+#define WRITEABLE NPY_WRITEABLE
+#define UPDATEIFCOPY NPY_UPDATEIFCOPY
+#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY
+#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR
+#define BEHAVED NPY_BEHAVED
+#define BEHAVED_NS NPY_BEHAVED_NS
+#define CARRAY NPY_CARRAY
+#define CARRAY_RO NPY_CARRAY_RO
+#define FARRAY NPY_FARRAY
+#define FARRAY_RO NPY_FARRAY_RO
+#define DEFAULT NPY_DEFAULT
+#define IN_ARRAY NPY_IN_ARRAY
+#define OUT_ARRAY NPY_OUT_ARRAY
+#define INOUT_ARRAY NPY_INOUT_ARRAY
+#define IN_FARRAY NPY_IN_FARRAY
+#define OUT_FARRAY NPY_OUT_FARRAY
+#define INOUT_FARRAY NPY_INOUT_FARRAY
+#define UPDATE_ALL NPY_UPDATE_ALL
+
+#define OWN_DATA NPY_OWNDATA
+#define BEHAVED_FLAGS NPY_BEHAVED
+#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS
+#define CARRAY_FLAGS_RO NPY_CARRAY_RO
+#define CARRAY_FLAGS NPY_CARRAY
+#define FARRAY_FLAGS NPY_FARRAY
+#define FARRAY_FLAGS_RO NPY_FARRAY_RO
+#define DEFAULT_FLAGS NPY_DEFAULT
+#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS
+
+#ifndef MIN
+#define MIN PyArray_MIN
+#endif
+#ifndef MAX
+#define MAX PyArray_MAX
+#endif
+#define MAX_INTP NPY_MAX_INTP
+#define MIN_INTP NPY_MIN_INTP
+#define MAX_UINTP NPY_MAX_UINTP
+#define INTP_FMT NPY_INTP_FMT
+
+#ifndef PYPY_VERSION
+#define REFCOUNT PyArray_REFCOUNT
+#define MAX_ELSIZE NPY_MAX_ELSIZE
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
new file mode 100644
index 0000000..a6ee212
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -0,0 +1,133 @@
+#ifndef _NPY_1_7_DEPRECATED_API_H
+#define _NPY_1_7_DEPRECATED_API_H
+
+#ifndef NPY_DEPRECATED_INCLUDES
+#error "Should never include npy_*_*_deprecated_api directly."
+#endif
+
+/* Emit a warning if the user did not specifically request the old API */
+#ifndef NPY_NO_DEPRECATED_API
+#if defined(_WIN32)
+#define _WARN___STR2__(x) #x
+#define _WARN___STR1__(x) _WARN___STR2__(x)
+#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
+#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+#elif defined(__GNUC__)
+#warning "Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+#endif
+/* TODO: How to do this warning message for other compilers? */
+#endif
+
+/*
+ * This header exists to collect all dangerous/deprecated NumPy API
+ * as of NumPy 1.7.
+ *
+ * This is an attempt to remove bad API, the proliferation of macros,
+ * and namespace pollution currently produced by the NumPy headers.
+ */
+
+/* These array flags are deprecated as of NumPy 1.7 */
+#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
+#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
+
+/*
+ * The consistent NPY_ARRAY_* names which don't pollute the NPY_*
+ * namespace were added in NumPy 1.7.
+ *
+ * These versions of the carray flags are deprecated, but
+ * probably should only be removed after two releases instead of one.
+ */
+#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
+#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS
+#define NPY_OWNDATA NPY_ARRAY_OWNDATA
+#define NPY_FORCECAST NPY_ARRAY_FORCECAST
+#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY
+#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY
+#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES
+#define NPY_ALIGNED NPY_ARRAY_ALIGNED
+#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED
+#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE
+#define NPY_UPDATEIFCOPY NPY_ARRAY_UPDATEIFCOPY
+#define NPY_BEHAVED NPY_ARRAY_BEHAVED
+#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS
+#define NPY_CARRAY NPY_ARRAY_CARRAY
+#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO
+#define NPY_FARRAY NPY_ARRAY_FARRAY
+#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO
+#define NPY_DEFAULT NPY_ARRAY_DEFAULT
+#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY
+#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY
+#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY
+#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY
+#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY
+#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY
+#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL
+
+/* This way of accessing the default type is deprecated as of NumPy 1.7 */
+#define PyArray_DEFAULT NPY_DEFAULT_TYPE
+
+/* These DATETIME bits aren't used internally */
+#if PY_VERSION_HEX >= 0x03000000
+#define PyDataType_GetDatetimeMetaData(descr) \
+ ((descr->metadata == NULL) ? NULL : \
+ ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \
+ PyDict_GetItemString( \
+ descr->metadata, NPY_METADATA_DTSTR), NULL))))
+#else
+#define PyDataType_GetDatetimeMetaData(descr) \
+ ((descr->metadata == NULL) ? NULL : \
+ ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \
+ PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR)))))
+#endif
+
+/*
+ * Deprecated as of NumPy 1.7, this kind of shortcut doesn't
+ * belong in the public API.
+ */
+#define NPY_AO PyArrayObject
+
+/*
+ * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't
+ * belong in the public API.
+ */
+#define fortran fortran_
+
+/*
+ * Deprecated as of NumPy 1.7, as it is a namespace-polluting
+ * macro.
+ */
+#define FORTRAN_IF PyArray_FORTRAN_IF
+
+/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */
+#define NPY_METADATA_DTSTR "__timeunit__"
+
+/*
+ * Deprecated as of NumPy 1.7.
+ * The reasoning:
+ * - These are for datetime, but there's no datetime "namespace".
+ * - They just turn NPY_STR_ into "", which is just
+ * making something simple be indirected.
+ */
+#define NPY_STR_Y "Y"
+#define NPY_STR_M "M"
+#define NPY_STR_W "W"
+#define NPY_STR_D "D"
+#define NPY_STR_h "h"
+#define NPY_STR_m "m"
+#define NPY_STR_s "s"
+#define NPY_STR_ms "ms"
+#define NPY_STR_us "us"
+#define NPY_STR_ns "ns"
+#define NPY_STR_ps "ps"
+#define NPY_STR_fs "fs"
+#define NPY_STR_as "as"
+
+/*
+ * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be
+ * removed in the next major release.
+ */
+#include "old_defines.h"
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h
new file mode 100644
index 0000000..832bc05
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_3kcompat.h
@@ -0,0 +1,577 @@
+/*
+ * This is a convenience header file providing compatibility utilities
+ * for supporting Python 2 and Python 3 in the same code base.
+ *
+ * If you want to use this for your own projects, it's recommended to make a
+ * copy of it. Although the stuff below is unlikely to change, we don't provide
+ * strong backwards compatibility guarantees at the moment.
+ */
+
+#ifndef _NPY_3KCOMPAT_H_
+#define _NPY_3KCOMPAT_H_
+
+#include
+#include
+
+#if PY_VERSION_HEX >= 0x03000000
+#ifndef NPY_PY3K
+#define NPY_PY3K 1
+#endif
+#endif
+
+#include "numpy/npy_common.h"
+#include "numpy/ndarrayobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PyInt -> PyLong
+ */
+
+#if defined(NPY_PY3K)
+/* Return True only if the long fits in a C long */
+static NPY_INLINE int PyInt_Check(PyObject *op) {
+ int overflow = 0;
+ if (!PyLong_Check(op)) {
+ return 0;
+ }
+ PyLong_AsLongAndOverflow(op, &overflow);
+ return (overflow == 0);
+}
+
+#define PyInt_FromLong PyLong_FromLong
+#define PyInt_AsLong PyLong_AsLong
+#define PyInt_AS_LONG PyLong_AsLong
+#define PyInt_AsSsize_t PyLong_AsSsize_t
+
+/* NOTE:
+ *
+ * Since the PyLong type is very different from the fixed-range PyInt,
+ * we don't define PyInt_Type -> PyLong_Type.
+ */
+#endif /* NPY_PY3K */
+
+/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */
+#ifdef NPY_PY3K
+# define NpySlice_GetIndicesEx PySlice_GetIndicesEx
+#else
+# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \
+ PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
+#endif
+
+/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
+#if (PY_VERSION_HEX < 0x02070B00) || \
+ ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
+ #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
+#else
+ #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
+#endif
+
+/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
+#if PY_VERSION_HEX < 0x03050200
+ #define Py_SETREF(op, op2) \
+ do { \
+ PyObject *_py_tmp = (PyObject *)(op); \
+ (op) = (op2); \
+ Py_DECREF(_py_tmp); \
+ } while (0)
+#endif
+
+/*
+ * PyString -> PyBytes
+ */
+
+#if defined(NPY_PY3K)
+
+#define PyString_Type PyBytes_Type
+#define PyString_Check PyBytes_Check
+#define PyStringObject PyBytesObject
+#define PyString_FromString PyBytes_FromString
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_AsStringAndSize PyBytes_AsStringAndSize
+#define PyString_FromFormat PyBytes_FromFormat
+#define PyString_Concat PyBytes_Concat
+#define PyString_ConcatAndDel PyBytes_ConcatAndDel
+#define PyString_AsString PyBytes_AsString
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_Size PyBytes_Size
+
+#define PyUString_Type PyUnicode_Type
+#define PyUString_Check PyUnicode_Check
+#define PyUStringObject PyUnicodeObject
+#define PyUString_FromString PyUnicode_FromString
+#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
+#define PyUString_FromFormat PyUnicode_FromFormat
+#define PyUString_Concat PyUnicode_Concat2
+#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
+#define PyUString_GET_SIZE PyUnicode_GET_SIZE
+#define PyUString_Size PyUnicode_Size
+#define PyUString_InternFromString PyUnicode_InternFromString
+#define PyUString_Format PyUnicode_Format
+
+#define PyBaseString_Check(obj) (PyUnicode_Check(obj))
+
+#else
+
+#define PyBytes_Type PyString_Type
+#define PyBytes_Check PyString_Check
+#define PyBytesObject PyStringObject
+#define PyBytes_FromString PyString_FromString
+#define PyBytes_FromStringAndSize PyString_FromStringAndSize
+#define PyBytes_AS_STRING PyString_AS_STRING
+#define PyBytes_AsStringAndSize PyString_AsStringAndSize
+#define PyBytes_FromFormat PyString_FromFormat
+#define PyBytes_Concat PyString_Concat
+#define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#define PyBytes_AsString PyString_AsString
+#define PyBytes_GET_SIZE PyString_GET_SIZE
+#define PyBytes_Size PyString_Size
+
+#define PyUString_Type PyString_Type
+#define PyUString_Check PyString_Check
+#define PyUStringObject PyStringObject
+#define PyUString_FromString PyString_FromString
+#define PyUString_FromStringAndSize PyString_FromStringAndSize
+#define PyUString_FromFormat PyString_FromFormat
+#define PyUString_Concat PyString_Concat
+#define PyUString_ConcatAndDel PyString_ConcatAndDel
+#define PyUString_GET_SIZE PyString_GET_SIZE
+#define PyUString_Size PyString_Size
+#define PyUString_InternFromString PyString_InternFromString
+#define PyUString_Format PyString_Format
+
+#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
+
+#endif /* NPY_PY3K */
+
+
+static NPY_INLINE void
+PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
+{
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
+ Py_DECREF(right);
+}
+
+static NPY_INLINE void
+PyUnicode_Concat2(PyObject **left, PyObject *right)
+{
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
+}
+
+/*
+ * PyFile_* compatibility
+ */
+
+/*
+ * Get a FILE* handle to the file represented by the Python object
+ */
+static NPY_INLINE FILE*
+npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
+{
+ int fd, fd2, unbuf;
+ PyObject *ret, *os, *io, *io_raw;
+ npy_off_t pos;
+ FILE *handle;
+
+ /* For Python 2 PyFileObject, use PyFile_AsFile */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return PyFile_AsFile(file);
+ }
+#endif
+
+ /* Flush first to ensure things end up in the file in the correct order */
+ ret = PyObject_CallMethod(file, "flush", "");
+ if (ret == NULL) {
+ return NULL;
+ }
+ Py_DECREF(ret);
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return NULL;
+ }
+
+ /*
+ * The handle needs to be dup'd because we have to call fclose
+ * at the end
+ */
+ os = PyImport_ImportModule("os");
+ if (os == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallMethod(os, "dup", "i", fd);
+ Py_DECREF(os);
+ if (ret == NULL) {
+ return NULL;
+ }
+ fd2 = PyNumber_AsSsize_t(ret, NULL);
+ Py_DECREF(ret);
+
+ /* Convert to FILE* handle */
+#ifdef _WIN32
+ handle = _fdopen(fd2, mode);
+#else
+ handle = fdopen(fd2, mode);
+#endif
+ if (handle == NULL) {
+ PyErr_SetString(PyExc_IOError,
+ "Getting a FILE* from a Python file object failed");
+ return NULL;
+ }
+
+ /* Record the original raw file handle position */
+ *orig_pos = npy_ftell(handle);
+ if (*orig_pos == -1) {
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return handle;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ fclose(handle);
+ return NULL;
+ }
+ }
+
+ /* Seek raw handle to the Python-side position */
+ ret = PyObject_CallMethod(file, "tell", "");
+ if (ret == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ pos = PyLong_AsLongLong(ret);
+ Py_DECREF(ret);
+ if (PyErr_Occurred()) {
+ fclose(handle);
+ return NULL;
+ }
+ if (npy_fseek(handle, pos, SEEK_SET) == -1) {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ fclose(handle);
+ return NULL;
+ }
+ return handle;
+}
+
+/*
+ * Close the dup-ed file handle, and seek the Python one to the current position
+ */
+static NPY_INLINE int
+npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
+{
+ int fd, unbuf;
+ PyObject *ret, *io, *io_raw;
+ npy_off_t position;
+
+ /* For Python 2 PyFileObject, do nothing */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return 0;
+ }
+#endif
+
+ position = npy_ftell(handle);
+
+ /* Close the FILE* handle */
+ fclose(handle);
+
+ /*
+ * Restore original file handle position, in order to not confuse
+ * Python-side data structures
+ */
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return -1;
+ }
+
+ if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
+
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ return -1;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ return -1;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return 0;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ return -1;
+ }
+ }
+
+ if (position == -1) {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ return -1;
+ }
+
+ /* Seek Python-side handle to the FILE* handle position */
+ ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+static NPY_INLINE int
+npy_PyFile_Check(PyObject *file)
+{
+ int fd;
+ /* For Python 2, check if it is a PyFileObject */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return 1;
+ }
+#endif
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ PyErr_Clear();
+ return 0;
+ }
+ return 1;
+}
+
+static NPY_INLINE PyObject*
+npy_PyFile_OpenFile(PyObject *filename, const char *mode)
+{
+ PyObject *open;
+ open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
+ if (open == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(open, "Os", filename, mode);
+}
+
+static NPY_INLINE int
+npy_PyFile_CloseFile(PyObject *file)
+{
+ PyObject *ret;
+
+ ret = PyObject_CallMethod(file, "close", NULL);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetContext(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions, with:
+ * - a minimal implementation for python 2
+ * - __cause__ used instead of __context__
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetCause(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+/*
+ * PyObject_Cmp
+ */
+#if defined(NPY_PY3K)
+static NPY_INLINE int
+PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
+{
+ int v;
+ v = PyObject_RichCompareBool(i1, i2, Py_LT);
+ if (v == 1) {
+ *cmp = -1;
+ return 1;
+ }
+ else if (v == -1) {
+ return -1;
+ }
+
+ v = PyObject_RichCompareBool(i1, i2, Py_GT);
+ if (v == 1) {
+ *cmp = 1;
+ return 1;
+ }
+ else if (v == -1) {
+ return -1;
+ }
+
+ v = PyObject_RichCompareBool(i1, i2, Py_EQ);
+ if (v == 1) {
+ *cmp = 0;
+ return 1;
+ }
+ else {
+ *cmp = 0;
+ return -1;
+ }
+}
+#endif
+
+/*
+ * PyCObject functions adapted to PyCapsules.
+ *
+ * The main job here is to get rid of the improved error handling
+ * of PyCapsules. It's a shame...
+ */
+#if PY_VERSION_HEX >= 0x03000000
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
+{
+ PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
+ if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
+ PyErr_Clear();
+ Py_DECREF(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
+static NPY_INLINE void *
+NpyCapsule_AsVoidPtr(PyObject *obj)
+{
+ void *ret = PyCapsule_GetPointer(obj, NULL);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static NPY_INLINE void *
+NpyCapsule_GetDesc(PyObject *obj)
+{
+ return PyCapsule_GetContext(obj);
+}
+
+static NPY_INLINE int
+NpyCapsule_Check(PyObject *ptr)
+{
+ return PyCapsule_CheckExact(ptr);
+}
+
+#else
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
+{
+ return PyCObject_FromVoidPtr(ptr, dtor);
+}
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context,
+ void (*dtor)(void *, void *))
+{
+ return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor);
+}
+
+static NPY_INLINE void *
+NpyCapsule_AsVoidPtr(PyObject *ptr)
+{
+ return PyCObject_AsVoidPtr(ptr);
+}
+
+static NPY_INLINE void *
+NpyCapsule_GetDesc(PyObject *obj)
+{
+ return PyCObject_GetDesc(obj);
+}
+
+static NPY_INLINE int
+NpyCapsule_Check(PyObject *ptr)
+{
+ return PyCObject_Check(ptr);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _NPY_3KCOMPAT_H_ */
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h
new file mode 100644
index 0000000..64aaaac
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_common.h
@@ -0,0 +1,1083 @@
+#ifndef _NPY_COMMON_H_
+#define _NPY_COMMON_H_
+
+/* numpconfig.h is auto-generated */
+#include "numpyconfig.h"
+#ifdef HAVE_NPY_CONFIG_H
+#include
+#endif
+
+/* need Python.h for npy_intp, npy_uintp */
+#include
+
+/*
+ * using static inline modifiers when defining npy_math functions
+ * allows the compiler to make optimizations when possible
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+#ifndef NPY_INLINE_MATH
+#define NPY_INLINE_MATH 1
+#endif
+#endif
+
+/*
+ * gcc does not unroll even with -O3
+ * use with care, unrolling on modern cpus rarely speeds things up
+ */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
+#define NPY_GCC_UNROLL_LOOPS \
+ __attribute__((optimize("unroll-loops")))
+#else
+#define NPY_GCC_UNROLL_LOOPS
+#endif
+
+/* highest gcc optimization level, enabled autovectorizer */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3
+#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))
+#else
+#define NPY_GCC_OPT_3
+#endif
+
+/* compile target attributes */
+#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX
+#define NPY_GCC_TARGET_AVX __attribute__((target("avx")))
+#else
+#define NPY_GCC_TARGET_AVX
+#endif
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
+#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
+#else
+#define NPY_GCC_TARGET_AVX2
+#endif
+
+/*
+ * mark an argument (starting from 1) that must not be NULL and is not checked
+ * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
+ */
+#ifdef HAVE_ATTRIBUTE_NONNULL
+#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))
+#else
+#define NPY_GCC_NONNULL(n)
+#endif
+
+#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS
+#define NPY_HAVE_SSE_INTRINSICS
+#endif
+
+#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD
+#define NPY_HAVE_SSE2_INTRINSICS
+#endif
+
+/*
+ * give a hint to the compiler which branch is more likely or unlikely
+ * to occur, e.g. rare error cases:
+ *
+ * if (NPY_UNLIKELY(failure == 0))
+ * return NULL;
+ *
+ * the double !! is to cast the expression (e.g. NULL) to a boolean required by
+ * the intrinsic
+ */
+#ifdef HAVE___BUILTIN_EXPECT
+#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
+#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define NPY_LIKELY(x) (x)
+#define NPY_UNLIKELY(x) (x)
+#endif
+
+#ifdef HAVE___BUILTIN_PREFETCH
+/* unlike _mm_prefetch also works on non-x86 */
+#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
+#else
+#ifdef HAVE__MM_PREFETCH
+/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
+#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
+ (loc == 1 ? _MM_HINT_T2 : \
+ (loc == 2 ? _MM_HINT_T1 : \
+ (loc == 3 ? _MM_HINT_T0 : -1))))
+#else
+#define NPY_PREFETCH(x, rw,loc)
+#endif
+#endif
+
+#if defined(_MSC_VER)
+ #define NPY_INLINE __inline
+#elif defined(__GNUC__)
+ #if defined(__STRICT_ANSI__)
+ #define NPY_INLINE __inline__
+ #else
+ #define NPY_INLINE inline
+ #endif
+#else
+ #define NPY_INLINE
+#endif
+
+#ifdef HAVE___THREAD
+ #define NPY_TLS __thread
+#else
+ #ifdef HAVE___DECLSPEC_THREAD_
+ #define NPY_TLS __declspec(thread)
+ #else
+ #define NPY_TLS
+ #endif
+#endif
+
+#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE
+ #define NPY_RETURNS_BORROWED_REF \
+ __attribute__((cpychecker_returns_borrowed_ref))
+#else
+ #define NPY_RETURNS_BORROWED_REF
+#endif
+
+#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE
+ #define NPY_STEALS_REF_TO_ARG(n) \
+ __attribute__((cpychecker_steals_reference_to_arg(n)))
+#else
+ #define NPY_STEALS_REF_TO_ARG(n)
+#endif
+
+/* 64 bit file position support, also on win-amd64. Ticket #1660 */
+#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \
+ defined(__MINGW32__) || defined(__MINGW64__)
+ #include
+
+/* mingw based on 3.4.5 has lseek but not ftell/fseek */
+#if defined(__MINGW32__) || defined(__MINGW64__)
+extern int __cdecl _fseeki64(FILE *, long long, int);
+extern long long __cdecl _ftelli64(FILE *);
+#endif
+
+ #define npy_fseek _fseeki64
+ #define npy_ftell _ftelli64
+ #define npy_lseek _lseeki64
+ #define npy_off_t npy_int64
+
+ #if NPY_SIZEOF_INT == 8
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_LONG == 8
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_LONGLONG == 8
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#else
+#ifdef HAVE_FSEEKO
+ #define npy_fseek fseeko
+#else
+ #define npy_fseek fseek
+#endif
+#ifdef HAVE_FTELLO
+ #define npy_ftell ftello
+#else
+ #define npy_ftell ftell
+#endif
+ #include
+ #define npy_lseek lseek
+ #define npy_off_t off_t
+
+ #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT
+ #define NPY_OFF_T_PYFMT "h"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#endif
+
+/* enums for detected endianness */
+enum {
+ NPY_CPU_UNKNOWN_ENDIAN,
+ NPY_CPU_LITTLE,
+ NPY_CPU_BIG
+};
+
+/*
+ * This is to typedef npy_intp to the appropriate pointer size for this
+ * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h.
+ */
+typedef Py_intptr_t npy_intp;
+typedef Py_uintptr_t npy_uintp;
+
+/*
+ * Define sizes that were not defined in numpyconfig.h.
+ */
+#define NPY_SIZEOF_CHAR 1
+#define NPY_SIZEOF_BYTE 1
+#define NPY_SIZEOF_DATETIME 8
+#define NPY_SIZEOF_TIMEDELTA 8
+#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_HALF 2
+#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
+#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
+#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
+
+#ifdef constchar
+#undef constchar
+#endif
+
+#define NPY_SSIZE_T_PYFMT "n"
+#define constchar char
+
+/* NPY_INTP_FMT Note:
+ * Unlike the other NPY_*_FMT macros which are used with
+ * PyOS_snprintf, NPY_INTP_FMT is used with PyErr_Format and
+ * PyString_Format. These functions use different formatting
+ * codes which are portably specified according to the Python
+ * documentation. See ticket #1795.
+ */
+#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT
+ #define NPY_INTP NPY_INT
+ #define NPY_UINTP NPY_UINT
+ #define PyIntpArrType_Type PyIntArrType_Type
+ #define PyUIntpArrType_Type PyUIntArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_INT
+ #define NPY_MIN_INTP NPY_MIN_INT
+ #define NPY_MAX_UINTP NPY_MAX_UINT
+ #define NPY_INTP_FMT "d"
+#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG
+ #define NPY_INTP NPY_LONG
+ #define NPY_UINTP NPY_ULONG
+ #define PyIntpArrType_Type PyLongArrType_Type
+ #define PyUIntpArrType_Type PyULongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONG
+ #define NPY_MIN_INTP NPY_MIN_LONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONG
+ #define NPY_INTP_FMT "ld"
+#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG)
+ #define NPY_INTP NPY_LONGLONG
+ #define NPY_UINTP NPY_ULONGLONG
+ #define PyIntpArrType_Type PyLongLongArrType_Type
+ #define PyUIntpArrType_Type PyULongLongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONGLONG
+ #define NPY_MIN_INTP NPY_MIN_LONGLONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONGLONG
+ #define NPY_INTP_FMT "lld"
+#endif
+
+/*
+ * We can only use C99 formats for npy_int_p if it is the same as
+ * intp_t, hence the condition on HAVE_UNITPTR_T
+ */
+#if (NPY_USE_C99_FORMATS) == 1 \
+ && (defined HAVE_UINTPTR_T) \
+ && (defined HAVE_INTTYPES_H)
+ #include
+ #undef NPY_INTP_FMT
+ #define NPY_INTP_FMT PRIdPTR
+#endif
+
+
+/*
+ * Some platforms don't define bool, long long, or long double.
+ * Handle that here.
+ */
+#define NPY_BYTE_FMT "hhd"
+#define NPY_UBYTE_FMT "hhu"
+#define NPY_SHORT_FMT "hd"
+#define NPY_USHORT_FMT "hu"
+#define NPY_INT_FMT "d"
+#define NPY_UINT_FMT "u"
+#define NPY_LONG_FMT "ld"
+#define NPY_ULONG_FMT "lu"
+#define NPY_HALF_FMT "g"
+#define NPY_FLOAT_FMT "g"
+#define NPY_DOUBLE_FMT "g"
+
+
+#ifdef PY_LONG_LONG
+typedef PY_LONG_LONG npy_longlong;
+typedef unsigned PY_LONG_LONG npy_ulonglong;
+# ifdef _MSC_VER
+# define NPY_LONGLONG_FMT "I64d"
+# define NPY_ULONGLONG_FMT "I64u"
+# else
+# define NPY_LONGLONG_FMT "lld"
+# define NPY_ULONGLONG_FMT "llu"
+# endif
+# ifdef _MSC_VER
+# define NPY_LONGLONG_SUFFIX(x) (x##i64)
+# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)
+# else
+# define NPY_LONGLONG_SUFFIX(x) (x##LL)
+# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)
+# endif
+#else
+typedef long npy_longlong;
+typedef unsigned long npy_ulonglong;
+# define NPY_LONGLONG_SUFFIX(x) (x##L)
+# define NPY_ULONGLONG_SUFFIX(x) (x##UL)
+#endif
+
+
+typedef unsigned char npy_bool;
+#define NPY_FALSE 0
+#define NPY_TRUE 1
+
+
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ typedef double npy_longdouble;
+ #define NPY_LONGDOUBLE_FMT "g"
+#else
+ typedef long double npy_longdouble;
+ #define NPY_LONGDOUBLE_FMT "Lg"
+#endif
+
+#ifndef Py_USING_UNICODE
+#error Must use Python with unicode enabled.
+#endif
+
+
+typedef signed char npy_byte;
+typedef unsigned char npy_ubyte;
+typedef unsigned short npy_ushort;
+typedef unsigned int npy_uint;
+typedef unsigned long npy_ulong;
+
+/* These are for completeness */
+typedef char npy_char;
+typedef short npy_short;
+typedef int npy_int;
+typedef long npy_long;
+typedef float npy_float;
+typedef double npy_double;
+
+/*
+ * Hash value compatibility.
+ * As of Python 3.2 hash values are of type Py_hash_t.
+ * Previous versions use C long.
+ */
+#if PY_VERSION_HEX < 0x03020000
+typedef long npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
+#else
+typedef Py_hash_t npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
+#endif
+
+/*
+ * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
+ * able to do .real/.imag. Will have to convert code first.
+ */
+#if 0
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE)
+typedef complex npy_cdouble;
+#else
+typedef struct { double real, imag; } npy_cdouble;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT)
+typedef complex float npy_cfloat;
+#else
+typedef struct { float real, imag; } npy_cfloat;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE)
+typedef complex long double npy_clongdouble;
+#else
+typedef struct {npy_longdouble real, imag;} npy_clongdouble;
+#endif
+#endif
+#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE
+#error npy_cdouble definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { double real, imag; } npy_cdouble;
+
+#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT
+#error npy_cfloat definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { float real, imag; } npy_cfloat;
+
+#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE
+#error npy_clongdouble definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { npy_longdouble real, imag; } npy_clongdouble;
+
+/*
+ * numarray-style bit-width typedefs
+ */
+#define NPY_MAX_INT8 127
+#define NPY_MIN_INT8 -128
+#define NPY_MAX_UINT8 255
+#define NPY_MAX_INT16 32767
+#define NPY_MIN_INT16 -32768
+#define NPY_MAX_UINT16 65535
+#define NPY_MAX_INT32 2147483647
+#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
+#define NPY_MAX_UINT32 4294967295U
+#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
+#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
+#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+#define NPY_MIN_DATETIME NPY_MIN_INT64
+#define NPY_MAX_DATETIME NPY_MAX_INT64
+#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
+#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
+
+ /* Need to find the number of bits for each type and
+ make definitions accordingly.
+
+ C states that sizeof(char) == 1 by definition
+
+ So, just using the sizeof keyword won't help.
+
+ It also looks like Python itself uses sizeof(char) quite a
+ bit, which by definition should be 1 all the time.
+
+ Idea: Make Use of CHAR_BIT which should tell us how many
+ BITS per CHARACTER
+ */
+
+ /* Include platform definitions -- These are in the C89/90 standard */
+#include
+#define NPY_MAX_BYTE SCHAR_MAX
+#define NPY_MIN_BYTE SCHAR_MIN
+#define NPY_MAX_UBYTE UCHAR_MAX
+#define NPY_MAX_SHORT SHRT_MAX
+#define NPY_MIN_SHORT SHRT_MIN
+#define NPY_MAX_USHORT USHRT_MAX
+#define NPY_MAX_INT INT_MAX
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+#define NPY_MIN_INT INT_MIN
+#define NPY_MAX_UINT UINT_MAX
+#define NPY_MAX_LONG LONG_MAX
+#define NPY_MIN_LONG LONG_MIN
+#define NPY_MAX_ULONG ULONG_MAX
+
+#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
+#define NPY_BITSOF_CHAR CHAR_BIT
+#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
+#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
+#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
+#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
+#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
+#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
+#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
+#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
+#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
+#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
+#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
+#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
+
+#if NPY_BITSOF_LONG == 8
+#define NPY_INT8 NPY_LONG
+#define NPY_UINT8 NPY_ULONG
+ typedef long npy_int8;
+ typedef unsigned long npy_uint8;
+#define PyInt8ScalarObject PyLongScalarObject
+#define PyInt8ArrType_Type PyLongArrType_Type
+#define PyUInt8ScalarObject PyULongScalarObject
+#define PyUInt8ArrType_Type PyULongArrType_Type
+#define NPY_INT8_FMT NPY_LONG_FMT
+#define NPY_UINT8_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 16
+#define NPY_INT16 NPY_LONG
+#define NPY_UINT16 NPY_ULONG
+ typedef long npy_int16;
+ typedef unsigned long npy_uint16;
+#define PyInt16ScalarObject PyLongScalarObject
+#define PyInt16ArrType_Type PyLongArrType_Type
+#define PyUInt16ScalarObject PyULongScalarObject
+#define PyUInt16ArrType_Type PyULongArrType_Type
+#define NPY_INT16_FMT NPY_LONG_FMT
+#define NPY_UINT16_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 32
+#define NPY_INT32 NPY_LONG
+#define NPY_UINT32 NPY_ULONG
+ typedef long npy_int32;
+ typedef unsigned long npy_uint32;
+ typedef unsigned long npy_ucs4;
+#define PyInt32ScalarObject PyLongScalarObject
+#define PyInt32ArrType_Type PyLongArrType_Type
+#define PyUInt32ScalarObject PyULongScalarObject
+#define PyUInt32ArrType_Type PyULongArrType_Type
+#define NPY_INT32_FMT NPY_LONG_FMT
+#define NPY_UINT32_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 64
+#define NPY_INT64 NPY_LONG
+#define NPY_UINT64 NPY_ULONG
+ typedef long npy_int64;
+ typedef unsigned long npy_uint64;
+#define PyInt64ScalarObject PyLongScalarObject
+#define PyInt64ArrType_Type PyLongArrType_Type
+#define PyUInt64ScalarObject PyULongScalarObject
+#define PyUInt64ArrType_Type PyULongArrType_Type
+#define NPY_INT64_FMT NPY_LONG_FMT
+#define NPY_UINT64_FMT NPY_ULONG_FMT
+#define MyPyLong_FromInt64 PyLong_FromLong
+#define MyPyLong_AsInt64 PyLong_AsLong
+#elif NPY_BITSOF_LONG == 128
+#define NPY_INT128 NPY_LONG
+#define NPY_UINT128 NPY_ULONG
+ typedef long npy_int128;
+ typedef unsigned long npy_uint128;
+#define PyInt128ScalarObject PyLongScalarObject
+#define PyInt128ArrType_Type PyLongArrType_Type
+#define PyUInt128ScalarObject PyULongScalarObject
+#define PyUInt128ArrType_Type PyULongArrType_Type
+#define NPY_INT128_FMT NPY_LONG_FMT
+#define NPY_UINT128_FMT NPY_ULONG_FMT
+#endif
+
+#if NPY_BITSOF_LONGLONG == 8
+# ifndef NPY_INT8
+# define NPY_INT8 NPY_LONGLONG
+# define NPY_UINT8 NPY_ULONGLONG
+ typedef npy_longlong npy_int8;
+ typedef npy_ulonglong npy_uint8;
+# define PyInt8ScalarObject PyLongLongScalarObject
+# define PyInt8ArrType_Type PyLongLongArrType_Type
+# define PyUInt8ScalarObject PyULongLongScalarObject
+# define PyUInt8ArrType_Type PyULongLongArrType_Type
+#define NPY_INT8_FMT NPY_LONGLONG_FMT
+#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT8
+# define NPY_MIN_LONGLONG NPY_MIN_INT8
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT8
+#elif NPY_BITSOF_LONGLONG == 16
+# ifndef NPY_INT16
+# define NPY_INT16 NPY_LONGLONG
+# define NPY_UINT16 NPY_ULONGLONG
+ typedef npy_longlong npy_int16;
+ typedef npy_ulonglong npy_uint16;
+# define PyInt16ScalarObject PyLongLongScalarObject
+# define PyInt16ArrType_Type PyLongLongArrType_Type
+# define PyUInt16ScalarObject PyULongLongScalarObject
+# define PyUInt16ArrType_Type PyULongLongArrType_Type
+#define NPY_INT16_FMT NPY_LONGLONG_FMT
+#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT16
+# define NPY_MIN_LONGLONG NPY_MIN_INT16
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT16
+#elif NPY_BITSOF_LONGLONG == 32
+# ifndef NPY_INT32
+# define NPY_INT32 NPY_LONGLONG
+# define NPY_UINT32 NPY_ULONGLONG
+ typedef npy_longlong npy_int32;
+ typedef npy_ulonglong npy_uint32;
+ typedef npy_ulonglong npy_ucs4;
+# define PyInt32ScalarObject PyLongLongScalarObject
+# define PyInt32ArrType_Type PyLongLongArrType_Type
+# define PyUInt32ScalarObject PyULongLongScalarObject
+# define PyUInt32ArrType_Type PyULongLongArrType_Type
+#define NPY_INT32_FMT NPY_LONGLONG_FMT
+#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT32
+# define NPY_MIN_LONGLONG NPY_MIN_INT32
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT32
+#elif NPY_BITSOF_LONGLONG == 64
+# ifndef NPY_INT64
+# define NPY_INT64 NPY_LONGLONG
+# define NPY_UINT64 NPY_ULONGLONG
+ typedef npy_longlong npy_int64;
+ typedef npy_ulonglong npy_uint64;
+# define PyInt64ScalarObject PyLongLongScalarObject
+# define PyInt64ArrType_Type PyLongLongArrType_Type
+# define PyUInt64ScalarObject PyULongLongScalarObject
+# define PyUInt64ArrType_Type PyULongLongArrType_Type
+#define NPY_INT64_FMT NPY_LONGLONG_FMT
+#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
+# define MyPyLong_FromInt64 PyLong_FromLongLong
+# define MyPyLong_AsInt64 PyLong_AsLongLong
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT64
+# define NPY_MIN_LONGLONG NPY_MIN_INT64
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT64
+#elif NPY_BITSOF_LONGLONG == 128
+# ifndef NPY_INT128
+# define NPY_INT128 NPY_LONGLONG
+# define NPY_UINT128 NPY_ULONGLONG
+ typedef npy_longlong npy_int128;
+ typedef npy_ulonglong npy_uint128;
+# define PyInt128ScalarObject PyLongLongScalarObject
+# define PyInt128ArrType_Type PyLongLongArrType_Type
+# define PyUInt128ScalarObject PyULongLongScalarObject
+# define PyUInt128ArrType_Type PyULongLongArrType_Type
+#define NPY_INT128_FMT NPY_LONGLONG_FMT
+#define NPY_UINT128_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT128
+# define NPY_MIN_LONGLONG NPY_MIN_INT128
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT128
+#elif NPY_BITSOF_LONGLONG == 256
+# define NPY_INT256 NPY_LONGLONG
+# define NPY_UINT256 NPY_ULONGLONG
+ typedef npy_longlong npy_int256;
+ typedef npy_ulonglong npy_uint256;
+# define PyInt256ScalarObject PyLongLongScalarObject
+# define PyInt256ArrType_Type PyLongLongArrType_Type
+# define PyUInt256ScalarObject PyULongLongScalarObject
+# define PyUInt256ArrType_Type PyULongLongArrType_Type
+#define NPY_INT256_FMT NPY_LONGLONG_FMT
+#define NPY_UINT256_FMT NPY_ULONGLONG_FMT
+# define NPY_MAX_LONGLONG NPY_MAX_INT256
+# define NPY_MIN_LONGLONG NPY_MIN_INT256
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT256
+#endif
+
+#if NPY_BITSOF_INT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_INT
+#define NPY_UINT8 NPY_UINT
+ typedef int npy_int8;
+ typedef unsigned int npy_uint8;
+# define PyInt8ScalarObject PyIntScalarObject
+# define PyInt8ArrType_Type PyIntArrType_Type
+# define PyUInt8ScalarObject PyUIntScalarObject
+# define PyUInt8ArrType_Type PyUIntArrType_Type
+#define NPY_INT8_FMT NPY_INT_FMT
+#define NPY_UINT8_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_INT
+#define NPY_UINT16 NPY_UINT
+ typedef int npy_int16;
+ typedef unsigned int npy_uint16;
+# define PyInt16ScalarObject PyIntScalarObject
+# define PyInt16ArrType_Type PyIntArrType_Type
+# define PyUInt16ScalarObject PyIntUScalarObject
+# define PyUInt16ArrType_Type PyIntUArrType_Type
+#define NPY_INT16_FMT NPY_INT_FMT
+#define NPY_UINT16_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_INT
+#define NPY_UINT32 NPY_UINT
+ typedef int npy_int32;
+ typedef unsigned int npy_uint32;
+ typedef unsigned int npy_ucs4;
+# define PyInt32ScalarObject PyIntScalarObject
+# define PyInt32ArrType_Type PyIntArrType_Type
+# define PyUInt32ScalarObject PyUIntScalarObject
+# define PyUInt32ArrType_Type PyUIntArrType_Type
+#define NPY_INT32_FMT NPY_INT_FMT
+#define NPY_UINT32_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_INT
+#define NPY_UINT64 NPY_UINT
+ typedef int npy_int64;
+ typedef unsigned int npy_uint64;
+# define PyInt64ScalarObject PyIntScalarObject
+# define PyInt64ArrType_Type PyIntArrType_Type
+# define PyUInt64ScalarObject PyUIntScalarObject
+# define PyUInt64ArrType_Type PyUIntArrType_Type
+#define NPY_INT64_FMT NPY_INT_FMT
+#define NPY_UINT64_FMT NPY_UINT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_INT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_INT
+#define NPY_UINT128 NPY_UINT
+ typedef int npy_int128;
+ typedef unsigned int npy_uint128;
+# define PyInt128ScalarObject PyIntScalarObject
+# define PyInt128ArrType_Type PyIntArrType_Type
+# define PyUInt128ScalarObject PyUIntScalarObject
+# define PyUInt128ArrType_Type PyUIntArrType_Type
+#define NPY_INT128_FMT NPY_INT_FMT
+#define NPY_UINT128_FMT NPY_UINT_FMT
+#endif
+#endif
+
+#if NPY_BITSOF_SHORT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_SHORT
+#define NPY_UINT8 NPY_USHORT
+ typedef short npy_int8;
+ typedef unsigned short npy_uint8;
+# define PyInt8ScalarObject PyShortScalarObject
+# define PyInt8ArrType_Type PyShortArrType_Type
+# define PyUInt8ScalarObject PyUShortScalarObject
+# define PyUInt8ArrType_Type PyUShortArrType_Type
+#define NPY_INT8_FMT NPY_SHORT_FMT
+#define NPY_UINT8_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_SHORT
+#define NPY_UINT16 NPY_USHORT
+ typedef short npy_int16;
+ typedef unsigned short npy_uint16;
+# define PyInt16ScalarObject PyShortScalarObject
+# define PyInt16ArrType_Type PyShortArrType_Type
+# define PyUInt16ScalarObject PyUShortScalarObject
+# define PyUInt16ArrType_Type PyUShortArrType_Type
+#define NPY_INT16_FMT NPY_SHORT_FMT
+#define NPY_UINT16_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_SHORT
+#define NPY_UINT32 NPY_USHORT
+ typedef short npy_int32;
+ typedef unsigned short npy_uint32;
+ typedef unsigned short npy_ucs4;
+# define PyInt32ScalarObject PyShortScalarObject
+# define PyInt32ArrType_Type PyShortArrType_Type
+# define PyUInt32ScalarObject PyUShortScalarObject
+# define PyUInt32ArrType_Type PyUShortArrType_Type
+#define NPY_INT32_FMT NPY_SHORT_FMT
+#define NPY_UINT32_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_SHORT
+#define NPY_UINT64 NPY_USHORT
+ typedef short npy_int64;
+ typedef unsigned short npy_uint64;
+# define PyInt64ScalarObject PyShortScalarObject
+# define PyInt64ArrType_Type PyShortArrType_Type
+# define PyUInt64ScalarObject PyUShortScalarObject
+# define PyUInt64ArrType_Type PyUShortArrType_Type
+#define NPY_INT64_FMT NPY_SHORT_FMT
+#define NPY_UINT64_FMT NPY_USHORT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_SHORT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_SHORT
+#define NPY_UINT128 NPY_USHORT
+ typedef short npy_int128;
+ typedef unsigned short npy_uint128;
+# define PyInt128ScalarObject PyShortScalarObject
+# define PyInt128ArrType_Type PyShortArrType_Type
+# define PyUInt128ScalarObject PyUShortScalarObject
+# define PyUInt128ArrType_Type PyUShortArrType_Type
+#define NPY_INT128_FMT NPY_SHORT_FMT
+#define NPY_UINT128_FMT NPY_USHORT_FMT
+#endif
+#endif
+
+
+#if NPY_BITSOF_CHAR == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_BYTE
+#define NPY_UINT8 NPY_UBYTE
+ typedef signed char npy_int8;
+ typedef unsigned char npy_uint8;
+# define PyInt8ScalarObject PyByteScalarObject
+# define PyInt8ArrType_Type PyByteArrType_Type
+# define PyUInt8ScalarObject PyUByteScalarObject
+# define PyUInt8ArrType_Type PyUByteArrType_Type
+#define NPY_INT8_FMT NPY_BYTE_FMT
+#define NPY_UINT8_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_BYTE
+#define NPY_UINT16 NPY_UBYTE
+ typedef signed char npy_int16;
+ typedef unsigned char npy_uint16;
+# define PyInt16ScalarObject PyByteScalarObject
+# define PyInt16ArrType_Type PyByteArrType_Type
+# define PyUInt16ScalarObject PyUByteScalarObject
+# define PyUInt16ArrType_Type PyUByteArrType_Type
+#define NPY_INT16_FMT NPY_BYTE_FMT
+#define NPY_UINT16_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_BYTE
+#define NPY_UINT32 NPY_UBYTE
+ typedef signed char npy_int32;
+ typedef unsigned char npy_uint32;
+ typedef unsigned char npy_ucs4;
+# define PyInt32ScalarObject PyByteScalarObject
+# define PyInt32ArrType_Type PyByteArrType_Type
+# define PyUInt32ScalarObject PyUByteScalarObject
+# define PyUInt32ArrType_Type PyUByteArrType_Type
+#define NPY_INT32_FMT NPY_BYTE_FMT
+#define NPY_UINT32_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_BYTE
+#define NPY_UINT64 NPY_UBYTE
+ typedef signed char npy_int64;
+ typedef unsigned char npy_uint64;
+# define PyInt64ScalarObject PyByteScalarObject
+# define PyInt64ArrType_Type PyByteArrType_Type
+# define PyUInt64ScalarObject PyUByteScalarObject
+# define PyUInt64ArrType_Type PyUByteArrType_Type
+#define NPY_INT64_FMT NPY_BYTE_FMT
+#define NPY_UINT64_FMT NPY_UBYTE_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_CHAR == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_BYTE
+#define NPY_UINT128 NPY_UBYTE
+ typedef signed char npy_int128;
+ typedef unsigned char npy_uint128;
+# define PyInt128ScalarObject PyByteScalarObject
+# define PyInt128ArrType_Type PyByteArrType_Type
+# define PyUInt128ScalarObject PyUByteScalarObject
+# define PyUInt128ArrType_Type PyUByteArrType_Type
+#define NPY_INT128_FMT NPY_BYTE_FMT
+#define NPY_UINT128_FMT NPY_UBYTE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_DOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_DOUBLE
+#define NPY_COMPLEX64 NPY_CDOUBLE
+ typedef double npy_float32;
+ typedef npy_cdouble npy_complex64;
+# define PyFloat32ScalarObject PyDoubleScalarObject
+# define PyComplex64ScalarObject PyCDoubleScalarObject
+# define PyFloat32ArrType_Type PyDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_DOUBLE
+#define NPY_COMPLEX128 NPY_CDOUBLE
+ typedef double npy_float64;
+ typedef npy_cdouble npy_complex128;
+# define PyFloat64ScalarObject PyDoubleScalarObject
+# define PyComplex128ScalarObject PyCDoubleScalarObject
+# define PyFloat64ArrType_Type PyDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_DOUBLE
+#define NPY_COMPLEX160 NPY_CDOUBLE
+ typedef double npy_float80;
+ typedef npy_cdouble npy_complex160;
+# define PyFloat80ScalarObject PyDoubleScalarObject
+# define PyComplex160ScalarObject PyCDoubleScalarObject
+# define PyFloat80ArrType_Type PyDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_DOUBLE
+#define NPY_COMPLEX192 NPY_CDOUBLE
+ typedef double npy_float96;
+ typedef npy_cdouble npy_complex192;
+# define PyFloat96ScalarObject PyDoubleScalarObject
+# define PyComplex192ScalarObject PyCDoubleScalarObject
+# define PyFloat96ArrType_Type PyDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_DOUBLE
+#define NPY_COMPLEX256 NPY_CDOUBLE
+ typedef double npy_float128;
+ typedef npy_cdouble npy_complex256;
+# define PyFloat128ScalarObject PyDoubleScalarObject
+# define PyComplex256ScalarObject PyCDoubleScalarObject
+# define PyFloat128ArrType_Type PyDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_FLOAT == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_FLOAT
+#define NPY_COMPLEX64 NPY_CFLOAT
+ typedef float npy_float32;
+ typedef npy_cfloat npy_complex64;
+# define PyFloat32ScalarObject PyFloatScalarObject
+# define PyComplex64ScalarObject PyCFloatScalarObject
+# define PyFloat32ArrType_Type PyFloatArrType_Type
+# define PyComplex64ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_FLOAT
+#define NPY_COMPLEX128 NPY_CFLOAT
+ typedef float npy_float64;
+ typedef npy_cfloat npy_complex128;
+# define PyFloat64ScalarObject PyFloatScalarObject
+# define PyComplex128ScalarObject PyCFloatScalarObject
+# define PyFloat64ArrType_Type PyFloatArrType_Type
+# define PyComplex128ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_FLOAT
+#define NPY_COMPLEX160 NPY_CFLOAT
+ typedef float npy_float80;
+ typedef npy_cfloat npy_complex160;
+# define PyFloat80ScalarObject PyFloatScalarObject
+# define PyComplex160ScalarObject PyCFloatScalarObject
+# define PyFloat80ArrType_Type PyFloatArrType_Type
+# define PyComplex160ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_FLOAT
+#define NPY_COMPLEX192 NPY_CFLOAT
+ typedef float npy_float96;
+ typedef npy_cfloat npy_complex192;
+# define PyFloat96ScalarObject PyFloatScalarObject
+# define PyComplex192ScalarObject PyCFloatScalarObject
+# define PyFloat96ArrType_Type PyFloatArrType_Type
+# define PyComplex192ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_FLOAT
+#define NPY_COMPLEX256 NPY_CFLOAT
+ typedef float npy_float128;
+ typedef npy_cfloat npy_complex256;
+# define PyFloat128ScalarObject PyFloatScalarObject
+# define PyComplex256ScalarObject PyCFloatScalarObject
+# define PyFloat128ArrType_Type PyFloatArrType_Type
+# define PyComplex256ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
+#endif
+#endif
+
+/* half/float16 isn't a floating-point type in C */
+#define NPY_FLOAT16 NPY_HALF
+typedef npy_uint16 npy_half;
+typedef npy_half npy_float16;
+
+#if NPY_BITSOF_LONGDOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_LONGDOUBLE
+#define NPY_COMPLEX64 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float32;
+ typedef npy_clongdouble npy_complex64;
+# define PyFloat32ScalarObject PyLongDoubleScalarObject
+# define PyComplex64ScalarObject PyCLongDoubleScalarObject
+# define PyFloat32ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_LONGDOUBLE
+#define NPY_COMPLEX128 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float64;
+ typedef npy_clongdouble npy_complex128;
+# define PyFloat64ScalarObject PyLongDoubleScalarObject
+# define PyComplex128ScalarObject PyCLongDoubleScalarObject
+# define PyFloat64ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_LONGDOUBLE
+#define NPY_COMPLEX160 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float80;
+ typedef npy_clongdouble npy_complex160;
+# define PyFloat80ScalarObject PyLongDoubleScalarObject
+# define PyComplex160ScalarObject PyCLongDoubleScalarObject
+# define PyFloat80ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_LONGDOUBLE
+#define NPY_COMPLEX192 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float96;
+ typedef npy_clongdouble npy_complex192;
+# define PyFloat96ScalarObject PyLongDoubleScalarObject
+# define PyComplex192ScalarObject PyCLongDoubleScalarObject
+# define PyFloat96ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_LONGDOUBLE
+#define NPY_COMPLEX256 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float128;
+ typedef npy_clongdouble npy_complex256;
+# define PyFloat128ScalarObject PyLongDoubleScalarObject
+# define PyComplex256ScalarObject PyCLongDoubleScalarObject
+# define PyFloat128ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 256
+#define NPY_FLOAT256 NPY_LONGDOUBLE
+#define NPY_COMPLEX512 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float256;
+ typedef npy_clongdouble npy_complex512;
+# define PyFloat256ScalarObject PyLongDoubleScalarObject
+# define PyComplex512ScalarObject PyCLongDoubleScalarObject
+# define PyFloat256ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT
+#endif
+
+/* datetime typedefs */
+typedef npy_int64 npy_timedelta;
+typedef npy_int64 npy_datetime;
+#define NPY_DATETIME_FMT NPY_INT64_FMT
+#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
+
+/* End of typedefs for numarray style bit-width names */
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h
new file mode 100644
index 0000000..5edd8f4
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_cpu.h
@@ -0,0 +1,118 @@
+/*
+ * This set (target) cpu specific macros:
+ * - Possible values:
+ * NPY_CPU_X86
+ * NPY_CPU_AMD64
+ * NPY_CPU_PPC
+ * NPY_CPU_PPC64
+ * NPY_CPU_PPC64LE
+ * NPY_CPU_SPARC
+ * NPY_CPU_S390
+ * NPY_CPU_IA64
+ * NPY_CPU_HPPA
+ * NPY_CPU_ALPHA
+ * NPY_CPU_ARMEL
+ * NPY_CPU_ARMEB
+ * NPY_CPU_SH_LE
+ * NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
+ * NPY_CPU_RISCV64
+ */
+#ifndef _NPY_CPUARCH_H_
+#define _NPY_CPUARCH_H_
+
+#include "numpyconfig.h"
+#include /* for memcpy */
+
+#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
+ /*
+ * __i386__ is defined by gcc and Intel compiler on Linux,
+ * _M_IX86 by VS compiler,
+ * i386 by Sun compilers on opensolaris at least
+ */
+ #define NPY_CPU_X86
+#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
+ /*
+ * both __x86_64__ and __amd64__ are defined by gcc
+ * __x86_64 defined by sun compiler on opensolaris at least
+ * _M_AMD64 defined by MS compiler
+ */
+ #define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_PPC64
+#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
+ /*
+ * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
+ * but can't find it ATM
+ * _ARCH_PPC is used by at least gcc on AIX
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+ * for those specifically first before defaulting to ppc
+ */
+ #define NPY_CPU_PPC
+#elif defined(__sparc__) || defined(__sparc)
+ /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
+ #define NPY_CPU_SPARC
+#elif defined(__s390__)
+ #define NPY_CPU_S390
+#elif defined(__ia64)
+ #define NPY_CPU_IA64
+#elif defined(__hppa)
+ #define NPY_CPU_HPPA
+#elif defined(__alpha__)
+ #define NPY_CPU_ALPHA
+#elif defined(__arm__) || defined(__aarch64__)
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH64
+ #else
+ #define NPY_CPU_ARMEB
+ #endif
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH64
+ #else
+ #define NPY_CPU_ARMEL
+ #endif
+ #else
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+ #endif
+#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_SH_LE
+#elif defined(__sh__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_SH_BE
+#elif defined(__MIPSEL__)
+ #define NPY_CPU_MIPSEL
+#elif defined(__MIPSEB__)
+ #define NPY_CPU_MIPSEB
+#elif defined(__or1k__)
+ #define NPY_CPU_OR1K
+#elif defined(__mc68000__)
+ #define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
+#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
+ #define NPY_CPU_RISCV64
+#else
+ #error Unknown CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+#endif
+
+#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *))
+
+#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
+#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1
+#else
+#define NPY_CPU_HAVE_UNALIGNED_ACCESS 0
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h
new file mode 100644
index 0000000..44cdffd
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_endian.h
@@ -0,0 +1,72 @@
+#ifndef _NPY_ENDIAN_H_
+#define _NPY_ENDIAN_H_
+
+/*
+ * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
+ * endian.h
+ */
+
+#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
+ /* Use endian.h if available */
+
+ #if defined(NPY_HAVE_ENDIAN_H)
+ #include
+ #elif defined(NPY_HAVE_SYS_ENDIAN_H)
+ #include
+ #endif
+
+ #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN BIG_ENDIAN
+ #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER _BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN _BIG_ENDIAN
+ #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER __BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN __BIG_ENDIAN
+ #endif
+#endif
+
+#ifndef NPY_BYTE_ORDER
+ /* Set endianness info using target CPU */
+ #include "npy_cpu.h"
+
+ #define NPY_LITTLE_ENDIAN 1234
+ #define NPY_BIG_ENDIAN 4321
+
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_ARMEL_AARCH32) \
+ || defined(NPY_CPU_ARMEL_AARCH64) \
+ || defined(NPY_CPU_SH_LE) \
+ || defined(NPY_CPU_MIPSEL) \
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL) \
+ || defined(NPY_CPU_RISCV64)
+ #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_ARMEB_AARCH32) \
+ || defined(NPY_CPU_ARMEB_AARCH64) \
+ || defined(NPY_CPU_SH_BE) \
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_OR1K) \
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
+ #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
+ #else
+ #error Unknown CPU: can not set endianness
+ #endif
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h
new file mode 100644
index 0000000..40cb7ac
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_interrupt.h
@@ -0,0 +1,117 @@
+
+/* Signal handling:
+
+This header file defines macros that allow your code to handle
+interrupts received during processing. Interrupts that
+could reasonably be handled:
+
+SIGINT, SIGABRT, SIGALRM, SIGSEGV
+
+****Warning***************
+
+Do not allow code that creates temporary memory or increases reference
+counts of Python objects to be interrupted unless you handle it
+differently.
+
+**************************
+
+The mechanism for handling interrupts is conceptually simple:
+
+ - replace the signal handler with our own home-grown version
+ and store the old one.
+ - run the code to be interrupted -- if an interrupt occurs
+ the handler should basically just cause a return to the
+ calling function for finish work.
+ - restore the old signal handler
+
+Of course, every code that allows interrupts must account for
+returning via the interrupt and handle clean-up correctly. But,
+even still, the simple paradigm is complicated by at least three
+factors.
+
+ 1) platform portability (i.e. Microsoft says not to use longjmp
+ to return from signal handling. They have a __try and __except
+ extension to C instead but what about mingw?).
+
+ 2) how to handle threads: apparently whether signals are delivered to
+ every thread of the process or the "invoking" thread is platform
+ dependent. --- we don't handle threads for now.
+
+ 3) do we need to worry about re-entrance. For now, assume the
+ code will not call-back into itself.
+
+Ideas:
+
+ 1) Start by implementing an approach that works on platforms that
+ can use setjmp and longjmp functionality and does nothing
+ on other platforms.
+
+ 2) Ignore threads --- i.e. do not mix interrupt handling and threads
+
+ 3) Add a default signal_handler function to the C-API but have the rest
+ use macros.
+
+
+Simple Interface:
+
+
+In your C-extension: around a block of code you want to be interruptible
+with a SIGINT
+
+NPY_SIGINT_ON
+[code]
+NPY_SIGINT_OFF
+
+In order for this to work correctly, the
+[code] block must not allocate any memory or alter the reference count of any
+Python objects. In other words [code] must be interruptible so that continuation
+after NPY_SIGINT_OFF will only be "missing some computations"
+
+Interrupt handling does not work well with threads.
+
+*/
+
+/* Add signal handling macros
+ Make the global variable and signal handler part of the C-API
+*/
+
+#ifndef NPY_INTERRUPT_H
+#define NPY_INTERRUPT_H
+
+#ifndef NPY_NO_SIGNAL
+
+#include
+#include
+
+#ifndef sigsetjmp
+
+#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1)
+#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF jmp_buf
+
+#else
+
+#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2)
+#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF sigjmp_buf
+
+#endif
+
+# define NPY_SIGINT_ON { \
+ PyOS_sighandler_t _npy_sig_save; \
+ _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \
+ if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \
+ 1) == 0) { \
+
+# define NPY_SIGINT_OFF } \
+ PyOS_setsig(SIGINT, _npy_sig_save); \
+ }
+
+#else /* NPY_NO_SIGNAL */
+
+#define NPY_SIGINT_ON
+#define NPY_SIGINT_OFF
+
+#endif /* HAVE_SIGSETJMP */
+
+#endif /* NPY_INTERRUPT_H */
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h
new file mode 100644
index 0000000..582390c
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_math.h
@@ -0,0 +1,551 @@
+#ifndef __NPY_MATH_C99_H_
+#define __NPY_MATH_C99_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+#ifdef __SUNPRO_CC
+#include
+#endif
+#ifdef HAVE_NPY_CONFIG_H
+#include
+#endif
+#include
+
+/* By adding static inline specifiers to npy_math function definitions when
+ appropriate, compiler is given the opportunity to optimize */
+#if NPY_INLINE_MATH
+#define NPY_INPLACE NPY_INLINE static
+#else
+#define NPY_INPLACE
+#endif
+
+
+/*
+ * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
+ * for INFINITY)
+ *
+ * XXX: I should test whether INFINITY and NAN are available on the platform
+ */
+NPY_INLINE static float __npy_inff(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_nanf(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_pzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_nzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
+ return __bint.__f;
+}
+
+#define NPY_INFINITYF __npy_inff()
+#define NPY_NANF __npy_nanf()
+#define NPY_PZEROF __npy_pzerof()
+#define NPY_NZEROF __npy_nzerof()
+
+#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
+#define NPY_NAN ((npy_double)NPY_NANF)
+#define NPY_PZERO ((npy_double)NPY_PZEROF)
+#define NPY_NZERO ((npy_double)NPY_NZEROF)
+
+#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
+#define NPY_NANL ((npy_longdouble)NPY_NANF)
+#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
+#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
+
+/*
+ * Useful constants
+ */
+#define NPY_E 2.718281828459045235360287471352662498 /* e */
+#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
+#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
+#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
+#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
+#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
+#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
+#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
+#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
+#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
+#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
+#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
+#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
+
+#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
+#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
+#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
+#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
+#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
+#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
+#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
+#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
+#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
+#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
+#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */
+#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
+#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
+
+#define NPY_El 2.718281828459045235360287471352662498L /* e */
+#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
+#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
+#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
+#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
+#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
+#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
+#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
+#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
+#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
+#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */
+#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
+#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
+
+/*
+ * C99 double math funcs
+ */
+NPY_INPLACE double npy_sin(double x);
+NPY_INPLACE double npy_cos(double x);
+NPY_INPLACE double npy_tan(double x);
+NPY_INPLACE double npy_sinh(double x);
+NPY_INPLACE double npy_cosh(double x);
+NPY_INPLACE double npy_tanh(double x);
+
+NPY_INPLACE double npy_asin(double x);
+NPY_INPLACE double npy_acos(double x);
+NPY_INPLACE double npy_atan(double x);
+
+NPY_INPLACE double npy_log(double x);
+NPY_INPLACE double npy_log10(double x);
+NPY_INPLACE double npy_exp(double x);
+NPY_INPLACE double npy_sqrt(double x);
+NPY_INPLACE double npy_cbrt(double x);
+
+NPY_INPLACE double npy_fabs(double x);
+NPY_INPLACE double npy_ceil(double x);
+NPY_INPLACE double npy_fmod(double x, double y);
+NPY_INPLACE double npy_floor(double x);
+
+NPY_INPLACE double npy_expm1(double x);
+NPY_INPLACE double npy_log1p(double x);
+NPY_INPLACE double npy_hypot(double x, double y);
+NPY_INPLACE double npy_acosh(double x);
+NPY_INPLACE double npy_asinh(double xx);
+NPY_INPLACE double npy_atanh(double x);
+NPY_INPLACE double npy_rint(double x);
+NPY_INPLACE double npy_trunc(double x);
+NPY_INPLACE double npy_exp2(double x);
+NPY_INPLACE double npy_log2(double x);
+
+NPY_INPLACE double npy_atan2(double x, double y);
+NPY_INPLACE double npy_pow(double x, double y);
+NPY_INPLACE double npy_modf(double x, double* y);
+NPY_INPLACE double npy_frexp(double x, int* y);
+NPY_INPLACE double npy_ldexp(double n, int y);
+
+NPY_INPLACE double npy_copysign(double x, double y);
+double npy_nextafter(double x, double y);
+double npy_spacing(double x);
+
+/*
+ * IEEE 754 fpu handling. Those are guaranteed to be macros
+ */
+
+/* use builtins to avoid function calls in tight loops
+ * only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISNAN
+ #define npy_isnan(x) __builtin_isnan(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISNAN
+ #define npy_isnan(x) ((x) != (x))
+ #else
+ #if defined(_MSC_VER) && (_MSC_VER < 1900)
+ #define npy_isnan(x) _isnan((x))
+ #else
+ #define npy_isnan(x) isnan(x)
+ #endif
+ #endif
+#endif
+
+
+/* only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISFINITE
+ #define npy_isfinite(x) __builtin_isfinite(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISFINITE
+ #ifdef _MSC_VER
+ #define npy_isfinite(x) _finite((x))
+ #else
+ #define npy_isfinite(x) !npy_isnan((x) + (-x))
+ #endif
+ #else
+ #define npy_isfinite(x) isfinite((x))
+ #endif
+#endif
+
+/* only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISINF
+ #define npy_isinf(x) __builtin_isinf(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISINF
+ #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
+ #else
+ #if defined(_MSC_VER) && (_MSC_VER < 1900)
+ #define npy_isinf(x) (!_finite((x)) && !_isnan((x)))
+ #else
+ #define npy_isinf(x) isinf((x))
+ #endif
+ #endif
+#endif
+
+#ifndef NPY_HAVE_DECL_SIGNBIT
+ int _npy_signbit_f(float x);
+ int _npy_signbit_d(double x);
+ int _npy_signbit_ld(long double x);
+ #define npy_signbit(x) \
+ (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \
+ : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \
+ : _npy_signbit_f (x))
+#else
+ #define npy_signbit(x) signbit((x))
+#endif
+
+/*
+ * float C99 math functions
+ */
+NPY_INPLACE float npy_sinf(float x);
+NPY_INPLACE float npy_cosf(float x);
+NPY_INPLACE float npy_tanf(float x);
+NPY_INPLACE float npy_sinhf(float x);
+NPY_INPLACE float npy_coshf(float x);
+NPY_INPLACE float npy_tanhf(float x);
+NPY_INPLACE float npy_fabsf(float x);
+NPY_INPLACE float npy_floorf(float x);
+NPY_INPLACE float npy_ceilf(float x);
+NPY_INPLACE float npy_rintf(float x);
+NPY_INPLACE float npy_truncf(float x);
+NPY_INPLACE float npy_sqrtf(float x);
+NPY_INPLACE float npy_cbrtf(float x);
+NPY_INPLACE float npy_log10f(float x);
+NPY_INPLACE float npy_logf(float x);
+NPY_INPLACE float npy_expf(float x);
+NPY_INPLACE float npy_expm1f(float x);
+NPY_INPLACE float npy_asinf(float x);
+NPY_INPLACE float npy_acosf(float x);
+NPY_INPLACE float npy_atanf(float x);
+NPY_INPLACE float npy_asinhf(float x);
+NPY_INPLACE float npy_acoshf(float x);
+NPY_INPLACE float npy_atanhf(float x);
+NPY_INPLACE float npy_log1pf(float x);
+NPY_INPLACE float npy_exp2f(float x);
+NPY_INPLACE float npy_log2f(float x);
+
+NPY_INPLACE float npy_atan2f(float x, float y);
+NPY_INPLACE float npy_hypotf(float x, float y);
+NPY_INPLACE float npy_powf(float x, float y);
+NPY_INPLACE float npy_fmodf(float x, float y);
+
+NPY_INPLACE float npy_modff(float x, float* y);
+NPY_INPLACE float npy_frexpf(float x, int* y);
+NPY_INPLACE float npy_ldexpf(float x, int y);
+
+NPY_INPLACE float npy_copysignf(float x, float y);
+float npy_nextafterf(float x, float y);
+float npy_spacingf(float x);
+
+/*
+ * long double C99 math functions
+ */
+NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_sinhl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_coshl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_tanhl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_fabsl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_floorl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_ceill(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_rintl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_truncl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_cbrtl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_log10l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_logl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_expm1l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_asinl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_acosl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_atanl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_asinhl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_acoshl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_atanhl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_log1pl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_exp2l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
+
+NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y);
+
+NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
+NPY_INPLACE npy_longdouble npy_frexpl(npy_longdouble x, int* y);
+NPY_INPLACE npy_longdouble npy_ldexpl(npy_longdouble x, int y);
+
+NPY_INPLACE npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y);
+npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y);
+npy_longdouble npy_spacingl(npy_longdouble x);
+
+/*
+ * Non standard functions
+ */
+NPY_INPLACE double npy_deg2rad(double x);
+NPY_INPLACE double npy_rad2deg(double x);
+NPY_INPLACE double npy_logaddexp(double x, double y);
+NPY_INPLACE double npy_logaddexp2(double x, double y);
+NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
+NPY_INPLACE double npy_heaviside(double x, double h0);
+
+NPY_INPLACE float npy_deg2radf(float x);
+NPY_INPLACE float npy_rad2degf(float x);
+NPY_INPLACE float npy_logaddexpf(float x, float y);
+NPY_INPLACE float npy_logaddexp2f(float x, float y);
+NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
+NPY_INPLACE float npy_heavisidef(float x, float h0);
+
+NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
+ npy_longdouble *modulus);
+NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
+
+#define npy_degrees npy_rad2deg
+#define npy_degreesf npy_rad2degf
+#define npy_degreesl npy_rad2degl
+
+#define npy_radians npy_deg2rad
+#define npy_radiansf npy_deg2radf
+#define npy_radiansl npy_deg2radl
+
+/*
+ * Complex declarations
+ */
+
+/*
+ * C99 specifies that complex numbers have the same representation as
+ * an array of two elements, where the first element is the real part
+ * and the second element is the imaginary part.
+ */
+#define __NPY_CPACK_IMP(x, y, type, ctype) \
+ union { \
+ ctype z; \
+ type a[2]; \
+ } z1;; \
+ \
+ z1.a[0] = (x); \
+ z1.a[1] = (y); \
+ \
+ return z1.z;
+
+static NPY_INLINE npy_cdouble npy_cpack(double x, double y)
+{
+ __NPY_CPACK_IMP(x, y, double, npy_cdouble);
+}
+
+static NPY_INLINE npy_cfloat npy_cpackf(float x, float y)
+{
+ __NPY_CPACK_IMP(x, y, float, npy_cfloat);
+}
+
+static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
+{
+ __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CPACK_IMP
+
+/*
+ * Same remark as above, but in the other direction: extract first/second
+ * member of complex number, assuming a C99-compatible representation
+ *
+ * Those are defineds as static inline, and such as a reasonable compiler would
+ * most likely compile this to one or two instructions (on CISC at least)
+ */
+#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \
+ union { \
+ ctype z; \
+ type a[2]; \
+ } __z_repr; \
+ __z_repr.z = z; \
+ \
+ return __z_repr.a[index];
+
+static NPY_INLINE double npy_creal(npy_cdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
+}
+
+static NPY_INLINE double npy_cimag(npy_cdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
+}
+
+static NPY_INLINE float npy_crealf(npy_cfloat z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
+}
+
+static NPY_INLINE float npy_cimagf(npy_cfloat z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
+}
+
+static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
+}
+
+static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CEXTRACT_IMP
+
+/*
+ * Double precision complex functions
+ */
+double npy_cabs(npy_cdouble z);
+double npy_carg(npy_cdouble z);
+
+npy_cdouble npy_cexp(npy_cdouble z);
+npy_cdouble npy_clog(npy_cdouble z);
+npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
+
+npy_cdouble npy_csqrt(npy_cdouble z);
+
+npy_cdouble npy_ccos(npy_cdouble z);
+npy_cdouble npy_csin(npy_cdouble z);
+npy_cdouble npy_ctan(npy_cdouble z);
+
+npy_cdouble npy_ccosh(npy_cdouble z);
+npy_cdouble npy_csinh(npy_cdouble z);
+npy_cdouble npy_ctanh(npy_cdouble z);
+
+npy_cdouble npy_cacos(npy_cdouble z);
+npy_cdouble npy_casin(npy_cdouble z);
+npy_cdouble npy_catan(npy_cdouble z);
+
+npy_cdouble npy_cacosh(npy_cdouble z);
+npy_cdouble npy_casinh(npy_cdouble z);
+npy_cdouble npy_catanh(npy_cdouble z);
+
+/*
+ * Single precision complex functions
+ */
+float npy_cabsf(npy_cfloat z);
+float npy_cargf(npy_cfloat z);
+
+npy_cfloat npy_cexpf(npy_cfloat z);
+npy_cfloat npy_clogf(npy_cfloat z);
+npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
+
+npy_cfloat npy_csqrtf(npy_cfloat z);
+
+npy_cfloat npy_ccosf(npy_cfloat z);
+npy_cfloat npy_csinf(npy_cfloat z);
+npy_cfloat npy_ctanf(npy_cfloat z);
+
+npy_cfloat npy_ccoshf(npy_cfloat z);
+npy_cfloat npy_csinhf(npy_cfloat z);
+npy_cfloat npy_ctanhf(npy_cfloat z);
+
+npy_cfloat npy_cacosf(npy_cfloat z);
+npy_cfloat npy_casinf(npy_cfloat z);
+npy_cfloat npy_catanf(npy_cfloat z);
+
+npy_cfloat npy_cacoshf(npy_cfloat z);
+npy_cfloat npy_casinhf(npy_cfloat z);
+npy_cfloat npy_catanhf(npy_cfloat z);
+
+
+/*
+ * Extended precision complex functions
+ */
+npy_longdouble npy_cabsl(npy_clongdouble z);
+npy_longdouble npy_cargl(npy_clongdouble z);
+
+npy_clongdouble npy_cexpl(npy_clongdouble z);
+npy_clongdouble npy_clogl(npy_clongdouble z);
+npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
+
+npy_clongdouble npy_csqrtl(npy_clongdouble z);
+
+npy_clongdouble npy_ccosl(npy_clongdouble z);
+npy_clongdouble npy_csinl(npy_clongdouble z);
+npy_clongdouble npy_ctanl(npy_clongdouble z);
+
+npy_clongdouble npy_ccoshl(npy_clongdouble z);
+npy_clongdouble npy_csinhl(npy_clongdouble z);
+npy_clongdouble npy_ctanhl(npy_clongdouble z);
+
+npy_clongdouble npy_cacosl(npy_clongdouble z);
+npy_clongdouble npy_casinl(npy_clongdouble z);
+npy_clongdouble npy_catanl(npy_clongdouble z);
+
+npy_clongdouble npy_cacoshl(npy_clongdouble z);
+npy_clongdouble npy_casinhl(npy_clongdouble z);
+npy_clongdouble npy_catanhl(npy_clongdouble z);
+
+
+/*
+ * Functions that set the floating point error
+ * status word.
+ */
+
+/*
+ * platform-dependent code translates floating point
+ * status to an integer sum of these values
+ */
+#define NPY_FPE_DIVIDEBYZERO 1
+#define NPY_FPE_OVERFLOW 2
+#define NPY_FPE_UNDERFLOW 4
+#define NPY_FPE_INVALID 8
+
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
+int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
+void npy_set_floatstatus_divbyzero(void);
+void npy_set_floatstatus_overflow(void);
+void npy_set_floatstatus_underflow(void);
+void npy_set_floatstatus_invalid(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#if NPY_INLINE_MATH
+#include "npy_math_internal.h"
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
new file mode 100644
index 0000000..6183dc2
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
@@ -0,0 +1,19 @@
+/*
+ * This include file is provided for inclusion in Cython *.pyd files where
+ * one would like to define the NPY_NO_DEPRECATED_API macro. It can be
+ * included by
+ *
+ * cdef extern from "npy_no_deprecated_api.h": pass
+ *
+ */
+#ifndef NPY_NO_DEPRECATED_API
+
+/* put this check here since there may be multiple includes in C extensions. */
+#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \
+ defined(OLD_DEFINES_H)
+#error "npy_no_deprecated_api.h" must be first among numpy includes.
+#else
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h
new file mode 100644
index 0000000..9228c39
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/npy_os.h
@@ -0,0 +1,30 @@
+#ifndef _NPY_OS_H_
+#define _NPY_OS_H_
+
+#if defined(linux) || defined(__linux) || defined(__linux__)
+ #define NPY_OS_LINUX
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__OpenBSD__) || defined(__DragonFly__)
+ #define NPY_OS_BSD
+ #ifdef __FreeBSD__
+ #define NPY_OS_FREEBSD
+ #elif defined(__NetBSD__)
+ #define NPY_OS_NETBSD
+ #elif defined(__OpenBSD__)
+ #define NPY_OS_OPENBSD
+ #elif defined(__DragonFly__)
+ #define NPY_OS_DRAGONFLY
+ #endif
+#elif defined(sun) || defined(__sun)
+ #define NPY_OS_SOLARIS
+#elif defined(__CYGWIN__)
+ #define NPY_OS_CYGWIN
+#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
+ #define NPY_OS_WIN32
+#elif defined(__APPLE__)
+ #define NPY_OS_DARWIN
+#else
+ #define NPY_OS_UNKNOWN
+#endif
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h
new file mode 100644
index 0000000..ab198f3
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/numpyconfig.h
@@ -0,0 +1,41 @@
+#ifndef _NPY_NUMPYCONFIG_H_
+#define _NPY_NUMPYCONFIG_H_
+
+#include "_numpyconfig.h"
+
+/*
+ * On Mac OS X, because there is only one configuration stage for all the archs
+ * in universal builds, any macro which depends on the arch needs to be
+ * hardcoded
+ */
+#ifdef __APPLE__
+ #undef NPY_SIZEOF_LONG
+ #undef NPY_SIZEOF_PY_INTPTR_T
+
+ #ifdef __LP64__
+ #define NPY_SIZEOF_LONG 8
+ #define NPY_SIZEOF_PY_INTPTR_T 8
+ #else
+ #define NPY_SIZEOF_LONG 4
+ #define NPY_SIZEOF_PY_INTPTR_T 4
+ #endif
+#endif
+
+/**
+ * To help with the NPY_NO_DEPRECATED_API macro, we include API version
+ * numbers for specific versions of NumPy. To exclude all API that was
+ * deprecated as of 1.7, add the following before #including any NumPy
+ * headers:
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ */
+#define NPY_1_7_API_VERSION 0x00000007
+#define NPY_1_8_API_VERSION 0x00000008
+#define NPY_1_9_API_VERSION 0x00000008
+#define NPY_1_10_API_VERSION 0x00000008
+#define NPY_1_11_API_VERSION 0x00000008
+#define NPY_1_12_API_VERSION 0x00000008
+#define NPY_1_13_API_VERSION 0x00000008
+#define NPY_1_14_API_VERSION 0x00000008
+#define NPY_1_15_API_VERSION 0x00000008
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h
new file mode 100644
index 0000000..abf8159
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/old_defines.h
@@ -0,0 +1,187 @@
+/* This header is deprecated as of NumPy 1.7 */
+#ifndef OLD_DEFINES_H
+#define OLD_DEFINES_H
+
+#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION
+#error The header "old_defines.h" is deprecated as of NumPy 1.7.
+#endif
+
+#define NDARRAY_VERSION NPY_VERSION
+
+#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE
+#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE
+#define PyArray_BUFSIZE NPY_BUFSIZE
+
+#define PyArray_PRIORITY NPY_PRIORITY
+#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY
+#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE
+
+#define NPY_MAX PyArray_MAX
+#define NPY_MIN PyArray_MIN
+
+#define PyArray_TYPES NPY_TYPES
+#define PyArray_BOOL NPY_BOOL
+#define PyArray_BYTE NPY_BYTE
+#define PyArray_UBYTE NPY_UBYTE
+#define PyArray_SHORT NPY_SHORT
+#define PyArray_USHORT NPY_USHORT
+#define PyArray_INT NPY_INT
+#define PyArray_UINT NPY_UINT
+#define PyArray_LONG NPY_LONG
+#define PyArray_ULONG NPY_ULONG
+#define PyArray_LONGLONG NPY_LONGLONG
+#define PyArray_ULONGLONG NPY_ULONGLONG
+#define PyArray_HALF NPY_HALF
+#define PyArray_FLOAT NPY_FLOAT
+#define PyArray_DOUBLE NPY_DOUBLE
+#define PyArray_LONGDOUBLE NPY_LONGDOUBLE
+#define PyArray_CFLOAT NPY_CFLOAT
+#define PyArray_CDOUBLE NPY_CDOUBLE
+#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE
+#define PyArray_OBJECT NPY_OBJECT
+#define PyArray_STRING NPY_STRING
+#define PyArray_UNICODE NPY_UNICODE
+#define PyArray_VOID NPY_VOID
+#define PyArray_DATETIME NPY_DATETIME
+#define PyArray_TIMEDELTA NPY_TIMEDELTA
+#define PyArray_NTYPES NPY_NTYPES
+#define PyArray_NOTYPE NPY_NOTYPE
+#define PyArray_CHAR NPY_CHAR
+#define PyArray_USERDEF NPY_USERDEF
+#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES
+
+#define PyArray_INTP NPY_INTP
+#define PyArray_UINTP NPY_UINTP
+
+#define PyArray_INT8 NPY_INT8
+#define PyArray_UINT8 NPY_UINT8
+#define PyArray_INT16 NPY_INT16
+#define PyArray_UINT16 NPY_UINT16
+#define PyArray_INT32 NPY_INT32
+#define PyArray_UINT32 NPY_UINT32
+
+#ifdef NPY_INT64
+#define PyArray_INT64 NPY_INT64
+#define PyArray_UINT64 NPY_UINT64
+#endif
+
+#ifdef NPY_INT128
+#define PyArray_INT128 NPY_INT128
+#define PyArray_UINT128 NPY_UINT128
+#endif
+
+#ifdef NPY_FLOAT16
+#define PyArray_FLOAT16 NPY_FLOAT16
+#define PyArray_COMPLEX32 NPY_COMPLEX32
+#endif
+
+#ifdef NPY_FLOAT80
+#define PyArray_FLOAT80 NPY_FLOAT80
+#define PyArray_COMPLEX160 NPY_COMPLEX160
+#endif
+
+#ifdef NPY_FLOAT96
+#define PyArray_FLOAT96 NPY_FLOAT96
+#define PyArray_COMPLEX192 NPY_COMPLEX192
+#endif
+
+#ifdef NPY_FLOAT128
+#define PyArray_FLOAT128 NPY_FLOAT128
+#define PyArray_COMPLEX256 NPY_COMPLEX256
+#endif
+
+#define PyArray_FLOAT32 NPY_FLOAT32
+#define PyArray_COMPLEX64 NPY_COMPLEX64
+#define PyArray_FLOAT64 NPY_FLOAT64
+#define PyArray_COMPLEX128 NPY_COMPLEX128
+
+
+#define PyArray_TYPECHAR NPY_TYPECHAR
+#define PyArray_BOOLLTR NPY_BOOLLTR
+#define PyArray_BYTELTR NPY_BYTELTR
+#define PyArray_UBYTELTR NPY_UBYTELTR
+#define PyArray_SHORTLTR NPY_SHORTLTR
+#define PyArray_USHORTLTR NPY_USHORTLTR
+#define PyArray_INTLTR NPY_INTLTR
+#define PyArray_UINTLTR NPY_UINTLTR
+#define PyArray_LONGLTR NPY_LONGLTR
+#define PyArray_ULONGLTR NPY_ULONGLTR
+#define PyArray_LONGLONGLTR NPY_LONGLONGLTR
+#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR
+#define PyArray_HALFLTR NPY_HALFLTR
+#define PyArray_FLOATLTR NPY_FLOATLTR
+#define PyArray_DOUBLELTR NPY_DOUBLELTR
+#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR
+#define PyArray_CFLOATLTR NPY_CFLOATLTR
+#define PyArray_CDOUBLELTR NPY_CDOUBLELTR
+#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR
+#define PyArray_OBJECTLTR NPY_OBJECTLTR
+#define PyArray_STRINGLTR NPY_STRINGLTR
+#define PyArray_STRINGLTR2 NPY_STRINGLTR2
+#define PyArray_UNICODELTR NPY_UNICODELTR
+#define PyArray_VOIDLTR NPY_VOIDLTR
+#define PyArray_DATETIMELTR NPY_DATETIMELTR
+#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR
+#define PyArray_CHARLTR NPY_CHARLTR
+#define PyArray_INTPLTR NPY_INTPLTR
+#define PyArray_UINTPLTR NPY_UINTPLTR
+#define PyArray_GENBOOLLTR NPY_GENBOOLLTR
+#define PyArray_SIGNEDLTR NPY_SIGNEDLTR
+#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR
+#define PyArray_FLOATINGLTR NPY_FLOATINGLTR
+#define PyArray_COMPLEXLTR NPY_COMPLEXLTR
+
+#define PyArray_QUICKSORT NPY_QUICKSORT
+#define PyArray_HEAPSORT NPY_HEAPSORT
+#define PyArray_MERGESORT NPY_MERGESORT
+#define PyArray_SORTKIND NPY_SORTKIND
+#define PyArray_NSORTS NPY_NSORTS
+
+#define PyArray_NOSCALAR NPY_NOSCALAR
+#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR
+#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR
+#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR
+#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR
+#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR
+#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR
+#define PyArray_SCALARKIND NPY_SCALARKIND
+#define PyArray_NSCALARKINDS NPY_NSCALARKINDS
+
+#define PyArray_ANYORDER NPY_ANYORDER
+#define PyArray_CORDER NPY_CORDER
+#define PyArray_FORTRANORDER NPY_FORTRANORDER
+#define PyArray_ORDER NPY_ORDER
+
+#define PyDescr_ISBOOL PyDataType_ISBOOL
+#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED
+#define PyDescr_ISSIGNED PyDataType_ISSIGNED
+#define PyDescr_ISINTEGER PyDataType_ISINTEGER
+#define PyDescr_ISFLOAT PyDataType_ISFLOAT
+#define PyDescr_ISNUMBER PyDataType_ISNUMBER
+#define PyDescr_ISSTRING PyDataType_ISSTRING
+#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX
+#define PyDescr_ISPYTHON PyDataType_ISPYTHON
+#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE
+#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF
+#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED
+#define PyDescr_ISOBJECT PyDataType_ISOBJECT
+#define PyDescr_HASFIELDS PyDataType_HASFIELDS
+
+#define PyArray_LITTLE NPY_LITTLE
+#define PyArray_BIG NPY_BIG
+#define PyArray_NATIVE NPY_NATIVE
+#define PyArray_SWAP NPY_SWAP
+#define PyArray_IGNORE NPY_IGNORE
+
+#define PyArray_NATBYTE NPY_NATBYTE
+#define PyArray_OPPBYTE NPY_OPPBYTE
+
+#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE
+
+#define PyArray_USE_PYMEM NPY_USE_PYMEM
+
+#define PyArray_RemoveLargest PyArray_RemoveSmallest
+
+#define PyArray_UCS4 npy_ucs4
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h
new file mode 100644
index 0000000..38530fa
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/oldnumeric.h
@@ -0,0 +1,25 @@
+#include "arrayobject.h"
+
+#ifndef PYPY_VERSION
+#ifndef REFCOUNT
+# define REFCOUNT NPY_REFCOUNT
+# define MAX_ELSIZE 16
+#endif
+#endif
+
+#define PyArray_UNSIGNED_TYPES
+#define PyArray_SBYTE NPY_BYTE
+#define PyArray_CopyArray PyArray_CopyInto
+#define _PyArray_multiply_list PyArray_MultiplyIntList
+#define PyArray_ISSPACESAVER(m) NPY_FALSE
+#define PyScalarArray_Check PyArray_CheckScalar
+
+#define CONTIGUOUS NPY_CONTIGUOUS
+#define OWN_DIMENSIONS 0
+#define OWN_STRIDES 0
+#define OWN_DATA NPY_OWNDATA
+#define SAVESPACE 0
+#define SAVESPACEBIT 0
+
+#undef import_array
+#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } }
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt
new file mode 100644
index 0000000..883fb77
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufunc_api.txt
@@ -0,0 +1,337 @@
+
+=================
+NumPy Ufunc C-API
+=================
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void
+ **data, char *types, int ntypes, int nin, int
+ nout, int identity, const char *name, const
+ char *doc, int unused)
+
+
+::
+
+ int
+ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int
+ usertype, PyUFuncGenericFunction
+ function, int *arg_types, void *data)
+
+
+::
+
+ int
+ PyUFunc_GenericFunction(PyUFuncObject *ufunc, PyObject *args, PyObject
+ *kwds, PyArrayObject **op)
+
+
+This generic function is called with the ufunc object, the arguments to it,
+and an array of (pointers to) PyArrayObjects which are NULL.
+
+'op' is an array of at least NPY_MAXARGS PyArrayObject *.
+
+::
+
+ void
+ PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_O_O(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_On_Om(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ int
+ PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject
+ **errobj)
+
+
+On return, if errobj is populated with a non-NULL value, the caller
+owns a new reference to errobj.
+
+::
+
+ int
+ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
+
+
+::
+
+ void
+ PyUFunc_clearfperr()
+
+
+::
+
+ int
+ PyUFunc_getfperr(void )
+
+
+::
+
+ int
+ PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int
+ *first)
+
+
+::
+
+ int
+ PyUFunc_ReplaceLoopBySignature(PyUFuncObject
+ *func, PyUFuncGenericFunction
+ newfunc, int
+ *signature, PyUFuncGenericFunction
+ *oldfunc)
+
+
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void
+ **data, char *types, int
+ ntypes, int nin, int nout, int
+ identity, const char *name, const
+ char *doc, int unused, const char
+ *signature)
+
+
+::
+
+ int
+ PyUFunc_SetUsesArraysAsData(void **data, size_t i)
+
+
+::
+
+ void
+ PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void
+ *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp
+ *steps, void *func)
+
+
+::
+
+ int
+ PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING
+ casting, PyArrayObject
+ **operands, PyObject
+ *type_tup, PyArray_Descr **out_dtypes)
+
+
+This function applies the default type resolution rules
+for the provided ufunc.
+
+Returns 0 on success, -1 on error.
+
+::
+
+ int
+ PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING
+ casting, PyArrayObject
+ **operands, PyArray_Descr **dtypes)
+
+
+Validates that the input operands can be cast to
+the input types, and the output types can be cast to
+the output operands where provided.
+
+Returns 0 on success, -1 (with exception raised) on validation failure.
+
+::
+
+ int
+ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr
+ *user_dtype, PyUFuncGenericFunction
+ function, PyArray_Descr
+ **arg_dtypes, void *data)
+
+
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction
+ *func, void
+ **data, char
+ *types, int ntypes, int
+ nin, int nout, int
+ identity, const char
+ *name, const char
+ *doc, int unused, const
+ char
+ *signature, PyObject
+ *identity_value)
+
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h
new file mode 100644
index 0000000..90d837a
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/ufuncobject.h
@@ -0,0 +1,377 @@
+#ifndef Py_UFUNCOBJECT_H
+#define Py_UFUNCOBJECT_H
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The legacy generic inner loop for a standard element-wise or
+ * generalized ufunc.
+ */
+typedef void (*PyUFuncGenericFunction)
+ (char **args,
+ npy_intp *dimensions,
+ npy_intp *strides,
+ void *innerloopdata);
+
+/*
+ * The most generic one-dimensional inner loop for
+ * a masked standard element-wise ufunc. "Masked" here means that it skips
+ * doing calculations on any items for which the maskptr array has a true
+ * value.
+ */
+typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
+ char **dataptrs, npy_intp *strides,
+ char *maskptr, npy_intp mask_stride,
+ npy_intp count,
+ NpyAuxData *innerloopdata);
+
+/* Forward declaration for the type resolver and loop selector typedefs */
+struct _tagPyUFuncObject;
+
+/*
+ * Given the operands for calling a ufunc, should determine the
+ * calculation input and output data types and return an inner loop function.
+ * This function should validate that the casting rule is being followed,
+ * and fail if it is not.
+ *
+ * For backwards compatibility, the regular type resolution function does not
+ * support auxiliary data with object semantics. The type resolution call
+ * which returns a masked generic function returns a standard NpyAuxData
+ * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
+ * work.
+ *
+ * ufunc: The ufunc object.
+ * casting: The 'casting' parameter provided to the ufunc.
+ * operands: An array of length (ufunc->nin + ufunc->nout),
+ * with the output parameters possibly NULL.
+ * type_tup: Either NULL, or the type_tup passed to the ufunc.
+ * out_dtypes: An array which should be populated with new
+ * references to (ufunc->nin + ufunc->nout) new
+ * dtypes, one for each input and output. These
+ * dtypes should all be in native-endian format.
+ *
+ * Should return 0 on success, -1 on failure (with exception set),
+ * or -2 if Py_NotImplemented should be returned.
+ */
+typedef int (PyUFunc_TypeResolutionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+/*
+ * Given an array of DTypes as returned by the PyUFunc_TypeResolutionFunc,
+ * and an array of fixed strides (the array will contain NPY_MAX_INTP for
+ * strides which are not necessarily fixed), returns an inner loop
+ * with associated auxiliary data.
+ *
+ * For backwards compatibility, there is a variant of the inner loop
+ * selection which returns an inner loop irrespective of the strides,
+ * and with a void* static auxiliary data instead of an NpyAuxData *
+ * dynamically allocatable auxiliary data.
+ *
+ * ufunc: The ufunc object.
+ * dtypes: An array which has been populated with dtypes,
+ * in most cases by the type resolution function
+ * for the same ufunc.
+ * fixed_strides: For each input/output, either the stride that
+ * will be used every time the function is called
+ * or NPY_MAX_INTP if the stride might change or
+ * is not known ahead of time. The loop selection
+ * function may use this stride to pick inner loops
+ * which are optimized for contiguous or 0-stride
+ * cases.
+ * out_innerloop: Should be populated with the correct ufunc inner
+ * loop for the given type.
+ * out_innerloopdata: Should be populated with the void* data to
+ * be passed into the out_innerloop function.
+ * out_needs_api: If the inner loop needs to use the Python API,
+ * should set the to 1, otherwise should leave
+ * this untouched.
+ */
+typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ PyArray_Descr **dtypes,
+ PyUFuncGenericFunction *out_innerloop,
+ void **out_innerloopdata,
+ int *out_needs_api);
+typedef int (PyUFunc_MaskedInnerLoopSelectionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ PyArray_Descr **dtypes,
+ PyArray_Descr *mask_dtype,
+ npy_intp *fixed_strides,
+ npy_intp fixed_mask_stride,
+ PyUFunc_MaskedStridedInnerLoopFunc **out_innerloop,
+ NpyAuxData **out_innerloopdata,
+ int *out_needs_api);
+
+typedef struct _tagPyUFuncObject {
+ PyObject_HEAD
+ /*
+ * nin: Number of inputs
+ * nout: Number of outputs
+ * nargs: Always nin + nout (Why is it stored?)
+ */
+ int nin, nout, nargs;
+
+ /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */
+ int identity;
+
+ /* Array of one-dimensional core loops */
+ PyUFuncGenericFunction *functions;
+ /* Array of funcdata that gets passed into the functions */
+ void **data;
+ /* The number of elements in 'functions' and 'data' */
+ int ntypes;
+
+ /* Used to be unused field 'check_return' */
+ int reserved1;
+
+ /* The name of the ufunc */
+ const char *name;
+
+ /* Array of type numbers, of size ('nargs' * 'ntypes') */
+ char *types;
+
+ /* Documentation string */
+ const char *doc;
+
+ void *ptr;
+ PyObject *obj;
+ PyObject *userloops;
+
+ /* generalized ufunc parameters */
+
+ /* 0 for scalar ufunc; 1 for generalized ufunc */
+ int core_enabled;
+ /* number of distinct dimension names in signature */
+ int core_num_dim_ix;
+
+ /*
+ * dimension indices of input/output argument k are stored in
+ * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
+ */
+
+ /* numbers of core dimensions of each argument */
+ int *core_num_dims;
+ /*
+ * dimension indices in a flatted form; indices
+ * are in the range of [0,core_num_dim_ix)
+ */
+ int *core_dim_ixs;
+ /*
+ * positions of 1st core dimensions of each
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
+ */
+ int *core_offsets;
+ /* signature string for printing purpose */
+ char *core_signature;
+
+ /*
+ * A function which resolves the types and fills an array
+ * with the dtypes for the inputs and outputs.
+ */
+ PyUFunc_TypeResolutionFunc *type_resolver;
+ /*
+ * A function which returns an inner loop written for
+ * NumPy 1.6 and earlier ufuncs. This is for backwards
+ * compatibility, and may be NULL if inner_loop_selector
+ * is specified.
+ */
+ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
+ /*
+ * This was blocked off to be the "new" inner loop selector in 1.7,
+ * but this was never implemented. (This is also why the above
+ * selector is called the "legacy" selector.)
+ */
+ void *reserved2;
+ /*
+ * A function which returns a masked inner loop for the ufunc.
+ */
+ PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
+
+ /*
+ * List of flags for each operand when ufunc is called by nditer object.
+ * These flags will be used in addition to the default flags for each
+ * operand set by nditer object.
+ */
+ npy_uint32 *op_flags;
+
+ /*
+ * List of global flags used when ufunc is called by nditer object.
+ * These flags will be used in addition to the default global flags
+ * set by nditer object.
+ */
+ npy_uint32 iter_flags;
+
+ /* New in NPY_API_VERSION 0x0000000D and above */
+
+ /*
+ * for each core_num_dim_ix distinct dimension names,
+ * the possible "frozen" size (-1 if not frozen).
+ */
+ npy_intp *core_dim_sizes;
+
+ /*
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+ */
+ npy_uint32 *core_dim_flags;
+
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+ PyObject *identity_value;
+
+} PyUFuncObject;
+
+#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
+
+#define UFUNC_ERR_IGNORE 0
+#define UFUNC_ERR_WARN 1
+#define UFUNC_ERR_RAISE 2
+#define UFUNC_ERR_CALL 3
+#define UFUNC_ERR_PRINT 4
+#define UFUNC_ERR_LOG 5
+
+ /* Python side integer mask */
+
+#define UFUNC_MASK_DIVIDEBYZERO 0x07
+#define UFUNC_MASK_OVERFLOW 0x3f
+#define UFUNC_MASK_UNDERFLOW 0x1ff
+#define UFUNC_MASK_INVALID 0xfff
+
+#define UFUNC_SHIFT_DIVIDEBYZERO 0
+#define UFUNC_SHIFT_OVERFLOW 3
+#define UFUNC_SHIFT_UNDERFLOW 6
+#define UFUNC_SHIFT_INVALID 9
+
+
+#define UFUNC_OBJ_ISOBJECT 1
+#define UFUNC_OBJ_NEEDS_API 2
+
+ /* Default user error mode */
+#define UFUNC_ERR_DEFAULT \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
+
+#if NPY_ALLOW_THREADS
+#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
+#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
+#else
+#define NPY_LOOP_BEGIN_THREADS
+#define NPY_LOOP_END_THREADS
+#endif
+
+/*
+ * UFunc has unit of 0, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_Zero 0
+/*
+ * UFunc has unit of 1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_One 1
+/*
+ * UFunc has unit of -1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once. Intended for
+ * bitwise_and reduction.
+ */
+#define PyUFunc_MinusOne 2
+/*
+ * UFunc has no unit, and the order of operations cannot be reordered.
+ * This case does not allow reduction with multiple axes at once.
+ */
+#define PyUFunc_None -1
+/*
+ * UFunc has no unit, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is in identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
+
+#define UFUNC_REDUCE 0
+#define UFUNC_ACCUMULATE 1
+#define UFUNC_REDUCEAT 2
+#define UFUNC_OUTER 3
+
+
+typedef struct {
+ int nin;
+ int nout;
+ PyObject *callable;
+} PyUFunc_PyFuncData;
+
+/* A linked-list of function information for
+ user-defined 1-d loops.
+ */
+typedef struct _loop1d_info {
+ PyUFuncGenericFunction func;
+ void *data;
+ int *arg_types;
+ struct _loop1d_info *next;
+ int nargs;
+ PyArray_Descr **arg_dtypes;
+} PyUFunc_Loop1d;
+
+
+#include "__ufunc_api.h"
+
+#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
+
+#define UFUNC_CHECK_ERROR(arg) \
+ do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \
+ ((arg)->errormask && \
+ PyUFunc_checkfperr((arg)->errormask, \
+ (arg)->errobj, \
+ &(arg)->first))) \
+ goto fail;} while (0)
+
+/*
+ * THESE MACROS ARE DEPRECATED.
+ * Use npy_set_floatstatus_* in the npymath library.
+ */
+#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
+#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
+#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
+#define UFUNC_FPE_INVALID NPY_FPE_INVALID
+
+#define UFUNC_CHECK_STATUS(ret) \
+ { \
+ ret = npy_clear_floatstatus(); \
+ }
+#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
+#define generate_overflow_error() npy_set_floatstatus_overflow()
+
+ /* Make sure it gets defined if it isn't already */
+#ifndef UFUNC_NOFPE
+/* Clear the floating point exception default of Borland C++ */
+#if defined(__BORLANDC__)
+#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
+#else
+#define UFUNC_NOFPE
+#endif
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_UFUNCOBJECT_H */
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h
new file mode 100644
index 0000000..32218b8
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/include/numpy/utils.h
@@ -0,0 +1,21 @@
+#ifndef __NUMPY_UTILS_HEADER__
+#define __NUMPY_UTILS_HEADER__
+
+#ifndef __COMP_NPY_UNUSED
+ #if defined(__GNUC__)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ # elif defined(__ICC)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ # elif defined(__clang__)
+ #define __COMP_NPY_UNUSED __attribute__ ((unused))
+ #else
+ #define __COMP_NPY_UNUSED
+ #endif
+#endif
+
+/* Use this to tag a variable as not used. It will remove unused variable
+ * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
+ * to avoid accidental use */
+#define NPY_UNUSED(x) (__NPY_UNUSED_TAGGED ## x) __COMP_NPY_UNUSED
+
+#endif
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/info.py b/project/venv/lib/python2.7/site-packages/numpy/core/info.py
new file mode 100644
index 0000000..c6f7bbc
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/info.py
@@ -0,0 +1,87 @@
+"""Defines a multi-dimensional array and useful procedures for Numerical computation.
+
+Functions
+
+- array - NumPy Array construction
+- zeros - Return an array of all zeros
+- empty - Return an uninitialized array
+- shape - Return shape of sequence or array
+- rank - Return number of dimensions
+- size - Return number of elements in entire array or a
+ certain dimension
+- fromstring - Construct array from (byte) string
+- take - Select sub-arrays using sequence of indices
+- put - Set sub-arrays using sequence of 1-D indices
+- putmask - Set portion of arrays using a mask
+- reshape - Return array with new shape
+- repeat - Repeat elements of array
+- choose - Construct new array from indexed array tuple
+- correlate - Correlate two 1-d arrays
+- searchsorted - Search for element in 1-d array
+- sum - Total sum over a specified dimension
+- average - Average, possibly weighted, over axis or array.
+- cumsum - Cumulative sum over a specified dimension
+- product - Total product over a specified dimension
+- cumproduct - Cumulative product over a specified dimension
+- alltrue - Logical and over an entire axis
+- sometrue - Logical or over an entire axis
+- allclose - Tests if sequences are essentially equal
+
+More Functions:
+
+- arange - Return regularly spaced array
+- asarray - Guarantee NumPy array
+- convolve - Convolve two 1-d arrays
+- swapaxes - Exchange axes
+- concatenate - Join arrays together
+- transpose - Permute axes
+- sort - Sort elements of array
+- argsort - Indices of sorted array
+- argmax - Index of largest value
+- argmin - Index of smallest value
+- inner - Innerproduct of two arrays
+- dot - Dot product (matrix multiplication)
+- outer - Outerproduct of two arrays
+- resize - Return array with arbitrary new shape
+- indices - Tuple of indices
+- fromfunction - Construct array from universal function
+- diagonal - Return diagonal array
+- trace - Trace of array
+- dump - Dump array to file object (pickle)
+- dumps - Return pickled string representing data
+- load - Return array stored in file object
+- loads - Return array from pickled string
+- ravel - Return array as 1-D
+- nonzero - Indices of nonzero elements for 1-D array
+- shape - Shape of array
+- where - Construct array from binary result
+- compress - Elements of array where condition is true
+- clip - Clip array between two values
+- ones - Array of all ones
+- identity - 2-D identity array (matrix)
+
+(Universal) Math Functions
+
+ add logical_or exp
+ subtract logical_xor log
+ multiply logical_not log10
+ divide maximum sin
+ divide_safe minimum sinh
+ conjugate bitwise_and sqrt
+ power bitwise_or tan
+ absolute bitwise_xor tanh
+ negative invert ceil
+ greater left_shift fabs
+ greater_equal right_shift floor
+ less arccos arctan2
+ less_equal arcsin fmod
+ equal arctan hypot
+ not_equal cos around
+ logical_and cosh sign
+ arccosh arcsinh arctanh
+
+"""
+from __future__ import division, absolute_import, print_function
+
+depends = ['testing']
+global_symbols = ['*']
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc
new file mode 100644
index 0000000..32adae4
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/info.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a b/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a
new file mode 100644
index 0000000..bbbf777
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/lib/libnpymath.a differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
new file mode 100644
index 0000000..5840f5e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
@@ -0,0 +1,12 @@
+[meta]
+Name = mlib
+Description = Math library used with this version of numpy
+Version = 1.0
+
+[default]
+Libs=-lm
+Cflags=
+
+[msvc]
+Libs=m.lib
+Cflags=
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
new file mode 100644
index 0000000..3e465ad
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
@@ -0,0 +1,20 @@
+[meta]
+Name=npymath
+Description=Portable, core math library implementing C99 standard
+Version=0.1
+
+[variables]
+pkgname=numpy.core
+prefix=${pkgdir}
+libdir=${prefix}/lib
+includedir=${prefix}/include
+
+[default]
+Libs=-L${libdir} -lnpymath
+Cflags=-I${includedir}
+Requires=mlib
+
+[msvc]
+Libs=/LIBPATH:${libdir} npymath.lib
+Cflags=/INCLUDE:${includedir}
+Requires=mlib
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/machar.py b/project/venv/lib/python2.7/site-packages/numpy/core/machar.py
new file mode 100644
index 0000000..91fb4ed
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/machar.py
@@ -0,0 +1,344 @@
+"""
+Machine arithmetics - determine the parameters of the
+floating-point arithmetic system
+
+Author: Pearu Peterson, September 2003
+
+"""
+from __future__ import division, absolute_import, print_function
+
+__all__ = ['MachAr']
+
+from numpy.core.fromnumeric import any
+from numpy.core.numeric import errstate
+from numpy.core.overrides import set_module
+
+# Need to speed this up...especially for longfloat
+
+@set_module('numpy')
+class MachAr(object):
+ """
+ Diagnosing machine parameters.
+
+ Attributes
+ ----------
+ ibeta : int
+ Radix in which numbers are represented.
+ it : int
+ Number of base-`ibeta` digits in the floating point mantissa M.
+ machep : int
+ Exponent of the smallest (most negative) power of `ibeta` that,
+ added to 1.0, gives something different from 1.0
+ eps : float
+ Floating-point number ``beta**machep`` (floating point precision)
+ negep : int
+ Exponent of the smallest power of `ibeta` that, subtracted
+ from 1.0, gives something different from 1.0.
+ epsneg : float
+ Floating-point number ``beta**negep``.
+ iexp : int
+ Number of bits in the exponent (including its sign and bias).
+ minexp : int
+ Smallest (most negative) power of `ibeta` consistent with there
+ being no leading zeros in the mantissa.
+ xmin : float
+ Floating point number ``beta**minexp`` (the smallest [in
+ magnitude] usable floating value).
+ maxexp : int
+ Smallest (positive) power of `ibeta` that causes overflow.
+ xmax : float
+ ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
+ usable floating value).
+ irnd : int
+ In ``range(6)``, information on what kind of rounding is done
+ in addition, and on how underflow is handled.
+ ngrd : int
+ Number of 'guard digits' used when truncating the product
+ of two mantissas to fit the representation.
+ epsilon : float
+ Same as `eps`.
+ tiny : float
+ Same as `xmin`.
+ huge : float
+ Same as `xmax`.
+ precision : float
+ ``- int(-log10(eps))``
+ resolution : float
+ ``- 10**(-precision)``
+
+ Parameters
+ ----------
+ float_conv : function, optional
+ Function that converts an integer or integer array to a float
+ or float array. Default is `float`.
+ int_conv : function, optional
+ Function that converts a float or float array to an integer or
+ integer array. Default is `int`.
+ float_to_float : function, optional
+ Function that converts a float array to float. Default is `float`.
+ Note that this does not seem to do anything useful in the current
+ implementation.
+ float_to_str : function, optional
+ Function that converts a single float to a string. Default is
+ ``lambda v:'%24.16e' %v``.
+ title : str, optional
+ Title that is printed in the string representation of `MachAr`.
+
+ See Also
+ --------
+ finfo : Machine limits for floating point types.
+ iinfo : Machine limits for integer types.
+
+ References
+ ----------
+ .. [1] Press, Teukolsky, Vetterling and Flannery,
+ "Numerical Recipes in C++," 2nd ed,
+ Cambridge University Press, 2002, p. 31.
+
+ """
+
+ def __init__(self, float_conv=float,int_conv=int,
+ float_to_float=float,
+ float_to_str=lambda v:'%24.16e' % v,
+ title='Python floating point number'):
+ """
+
+ float_conv - convert integer to float (array)
+ int_conv - convert float (array) to integer
+ float_to_float - convert float array to float
+ float_to_str - convert array float to str
+ title - description of used floating point numbers
+
+ """
+ # We ignore all errors here because we are purposely triggering
+ # underflow to detect the properties of the runninng arch.
+ with errstate(under='ignore'):
+ self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
+
+ def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
+ max_iterN = 10000
+ msg = "Did not converge after %d tries with %s"
+ one = float_conv(1)
+ two = one + one
+ zero = one - one
+
+ # Do we really need to do this? Aren't they 2 and 2.0?
+ # Determine ibeta and beta
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ b = one
+ for _ in range(max_iterN):
+ b = b + b
+ temp = a + b
+ itemp = int_conv(temp-a)
+ if any(itemp != 0):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ ibeta = itemp
+ beta = float_conv(ibeta)
+
+ # Determine it and irnd
+ it = -1
+ b = one
+ for _ in range(max_iterN):
+ it = it + 1
+ b = b * beta
+ temp = b + one
+ temp1 = temp - b
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+
+ betah = beta / two
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ temp = a + betah
+ irnd = 0
+ if any(temp-a != zero):
+ irnd = 1
+ tempa = a + beta
+ temp = tempa + betah
+ if irnd == 0 and any(temp-tempa != zero):
+ irnd = 2
+
+ # Determine negep and epsneg
+ negep = it + 3
+ betain = one / beta
+ a = one
+ for i in range(negep):
+ a = a * betain
+ b = a
+ for _ in range(max_iterN):
+ temp = one - a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ negep = negep - 1
+ # Prevent infinite loop on PPC with gcc 4.0:
+ if negep < 0:
+ raise RuntimeError("could not determine machine tolerance "
+ "for 'negep', locals() -> %s" % (locals()))
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ negep = -negep
+ epsneg = a
+
+ # Determine machep and eps
+ machep = - it - 3
+ a = b
+
+ for _ in range(max_iterN):
+ temp = one + a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ machep = machep + 1
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ eps = a
+
+ # Determine ngrd
+ ngrd = 0
+ temp = one + eps
+ if irnd == 0 and any(temp*one - one != zero):
+ ngrd = 1
+
+ # Determine iexp
+ i = 0
+ k = 1
+ z = betain
+ t = one + eps
+ nxres = 0
+ for _ in range(max_iterN):
+ y = z
+ z = y*y
+ a = z*one # Check here for underflow
+ temp = z*t
+ if any(a+a == zero) or any(abs(z) >= y):
+ break
+ temp1 = temp * betain
+ if any(temp1*beta == z):
+ break
+ i = i + 1
+ k = k + k
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ if ibeta != 10:
+ iexp = i + 1
+ mx = k + k
+ else:
+ iexp = 2
+ iz = ibeta
+ while k >= iz:
+ iz = iz * ibeta
+ iexp = iexp + 1
+ mx = iz + iz - 1
+
+ # Determine minexp and xmin
+ for _ in range(max_iterN):
+ xmin = y
+ y = y * betain
+ a = y * one
+ temp = y * t
+ if any((a + a) != zero) and any(abs(y) < xmin):
+ k = k + 1
+ temp1 = temp * betain
+ if any(temp1*beta == y) and any(temp != y):
+ nxres = 3
+ xmin = y
+ break
+ else:
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ minexp = -k
+
+ # Determine maxexp, xmax
+ if mx <= k + k - 3 and ibeta != 10:
+ mx = mx + mx
+ iexp = iexp + 1
+ maxexp = mx + minexp
+ irnd = irnd + nxres
+ if irnd >= 2:
+ maxexp = maxexp - 2
+ i = maxexp + minexp
+ if ibeta == 2 and not i:
+ maxexp = maxexp - 1
+ if i > 20:
+ maxexp = maxexp - 1
+ if any(a != y):
+ maxexp = maxexp - 2
+ xmax = one - epsneg
+ if any(xmax*one != xmax):
+ xmax = one - beta*epsneg
+ xmax = xmax / (xmin*beta*beta*beta)
+ i = maxexp + minexp + 3
+ for j in range(i):
+ if ibeta == 2:
+ xmax = xmax + xmax
+ else:
+ xmax = xmax * beta
+
+ self.ibeta = ibeta
+ self.it = it
+ self.negep = negep
+ self.epsneg = float_to_float(epsneg)
+ self._str_epsneg = float_to_str(epsneg)
+ self.machep = machep
+ self.eps = float_to_float(eps)
+ self._str_eps = float_to_str(eps)
+ self.ngrd = ngrd
+ self.iexp = iexp
+ self.minexp = minexp
+ self.xmin = float_to_float(xmin)
+ self._str_xmin = float_to_str(xmin)
+ self.maxexp = maxexp
+ self.xmax = float_to_float(xmax)
+ self._str_xmax = float_to_str(xmax)
+ self.irnd = irnd
+
+ self.title = title
+ # Commonly used parameters
+ self.epsilon = self.eps
+ self.tiny = self.xmin
+ self.huge = self.xmax
+
+ import math
+ self.precision = int(-math.log10(float_to_float(self.eps)))
+ ten = two + two + two + two + two
+ resolution = ten ** (-self.precision)
+ self.resolution = float_to_float(resolution)
+ self._str_resolution = float_to_str(resolution)
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(title)s\n'
+ '---------------------------------------------------------------------\n'
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
+ '---------------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+
+if __name__ == '__main__':
+ print(MachAr())
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc
new file mode 100644
index 0000000..3aaaa4b
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/machar.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py
new file mode 100644
index 0000000..82bc470
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.py
@@ -0,0 +1,334 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from .numeric import uint8, ndarray, dtype
+from numpy.compat import (
+ long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
+)
+from numpy.core.overrides import set_module
+
+__all__ = ['memmap']
+
+dtypedescr = dtype
+valid_filemodes = ["r", "c", "r+", "w+"]
+writeable_filemodes = ["r+", "w+"]
+
+mode_equivalents = {
+ "readonly":"r",
+ "copyonwrite":"c",
+ "readwrite":"r+",
+ "write":"w+"
+ }
+
+
+@set_module('numpy')
+class memmap(ndarray):
+ """Create a memory-map to an array stored in a *binary* file on disk.
+
+ Memory-mapped files are used for accessing small segments of large files
+ on disk, without reading the entire file into memory. NumPy's
+ memmap's are array-like objects. This differs from Python's ``mmap``
+ module, which uses file-like objects.
+
+ This subclass of ndarray has some unpleasant interactions with
+ some operations, because it doesn't quite fit properly as a subclass.
+ An alternative to using this subclass is to create the ``mmap``
+ object yourself, then create an ndarray with ndarray.__new__ directly,
+ passing the object created in its 'buffer=' parameter.
+
+ This class may at some point be turned into a factory function
+ which returns a view into an mmap buffer.
+
+ Delete the memmap instance to close the memmap file.
+
+
+ Parameters
+ ----------
+ filename : str, file-like object, or pathlib.Path instance
+ The file name or file object to be used as the array data buffer.
+ dtype : data-type, optional
+ The data-type used to interpret the file contents.
+ Default is `uint8`.
+ mode : {'r+', 'r', 'w+', 'c'}, optional
+ The file is opened in this mode:
+
+ +------+-------------------------------------------------------------+
+ | 'r' | Open existing file for reading only. |
+ +------+-------------------------------------------------------------+
+ | 'r+' | Open existing file for reading and writing. |
+ +------+-------------------------------------------------------------+
+ | 'w+' | Create or overwrite existing file for reading and writing. |
+ +------+-------------------------------------------------------------+
+ | 'c' | Copy-on-write: assignments affect data in memory, but |
+ | | changes are not saved to disk. The file on disk is |
+ | | read-only. |
+ +------+-------------------------------------------------------------+
+
+ Default is 'r+'.
+ offset : int, optional
+ In the file, array data starts at this offset. Since `offset` is
+ measured in bytes, it should normally be a multiple of the byte-size
+ of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
+ file are valid; The file will be extended to accommodate the
+ additional data. By default, ``memmap`` will start at the beginning of
+ the file, even if ``filename`` is a file pointer ``fp`` and
+ ``fp.tell() != 0``.
+ shape : tuple, optional
+ The desired shape of the array. If ``mode == 'r'`` and the number
+ of remaining bytes after `offset` is not a multiple of the byte-size
+ of `dtype`, you must specify `shape`. By default, the returned array
+ will be 1-D with the number of elements determined by file size
+ and data-type.
+ order : {'C', 'F'}, optional
+ Specify the order of the ndarray memory layout:
+ :term:`row-major`, C-style or :term:`column-major`,
+ Fortran-style. This only has an effect if the shape is
+ greater than 1-D. The default order is 'C'.
+
+ Attributes
+ ----------
+ filename : str or pathlib.Path instance
+ Path to the mapped file.
+ offset : int
+ Offset position in the file.
+ mode : str
+ File mode.
+
+ Methods
+ -------
+ flush
+ Flush any changes in memory to file on disk.
+ When you delete a memmap object, flush is called first to write
+ changes to disk before removing the object.
+
+
+ See also
+ --------
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+ Notes
+ -----
+ The memmap object can be used anywhere an ndarray is accepted.
+ Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
+ ``True``.
+
+ Memory-mapped files cannot be larger than 2GB on 32-bit systems.
+
+ When a memmap causes a file to be created or extended beyond its
+ current size in the filesystem, the contents of the new part are
+ unspecified. On systems with POSIX filesystem semantics, the extended
+ part will be filled with zero bytes.
+
+ Examples
+ --------
+ >>> data = np.arange(12, dtype='float32')
+ >>> data.resize((3,4))
+
+ This example uses a temporary file so that doctest doesn't write
+ files to your directory. You would use a 'normal' filename.
+
+ >>> from tempfile import mkdtemp
+ >>> import os.path as path
+ >>> filename = path.join(mkdtemp(), 'newfile.dat')
+
+ Create a memmap with dtype and shape that matches our data:
+
+ >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
+ >>> fp
+ memmap([[ 0., 0., 0., 0.],
+ [ 0., 0., 0., 0.],
+ [ 0., 0., 0., 0.]], dtype=float32)
+
+ Write data to memmap array:
+
+ >>> fp[:] = data[:]
+ >>> fp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ >>> fp.filename == path.abspath(filename)
+ True
+
+ Deletion flushes memory changes to disk before removing the object:
+
+ >>> del fp
+
+ Load the memmap and verify data was stored:
+
+ >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> newfp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Read-only memmap:
+
+ >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> fpr.flags.writeable
+ False
+
+ Copy-on-write memmap:
+
+ >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
+ >>> fpc.flags.writeable
+ True
+
+ It's possible to assign to copy-on-write array, but values are only
+ written into the memory copy of the array, and not written to disk:
+
+ >>> fpc
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+ >>> fpc[0,:] = 0
+ >>> fpc
+ memmap([[ 0., 0., 0., 0.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ File on disk is unchanged:
+
+ >>> fpr
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Offset into a memmap:
+
+ >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
+ >>> fpo
+ memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
+
+ """
+
+ __array_priority__ = -100.0
+
+ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
+ shape=None, order='C'):
+ # Import here to minimize 'import numpy' overhead
+ import mmap
+ import os.path
+ try:
+ mode = mode_equivalents[mode]
+ except KeyError:
+ if mode not in valid_filemodes:
+ raise ValueError("mode must be one of %s" %
+ (valid_filemodes + list(mode_equivalents.keys())))
+
+ if mode == 'w+' and shape is None:
+ raise ValueError("shape must be given")
+
+ if hasattr(filename, 'read'):
+ f_ctx = contextlib_nullcontext(filename)
+ else:
+ f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
+
+ with f_ctx as fid:
+ fid.seek(0, 2)
+ flen = fid.tell()
+ descr = dtypedescr(dtype)
+ _dbytes = descr.itemsize
+
+ if shape is None:
+ bytes = flen - offset
+ if bytes % _dbytes:
+ raise ValueError("Size of available data is not a "
+ "multiple of the data-type size.")
+ size = bytes // _dbytes
+ shape = (size,)
+ else:
+ if not isinstance(shape, tuple):
+ shape = (shape,)
+ size = np.intp(1) # avoid default choice of np.int_, which might overflow
+ for k in shape:
+ size *= k
+
+ bytes = long(offset + size*_dbytes)
+
+ if mode == 'w+' or (mode == 'r+' and flen < bytes):
+ fid.seek(bytes - 1, 0)
+ fid.write(b'\0')
+ fid.flush()
+
+ if mode == 'c':
+ acc = mmap.ACCESS_COPY
+ elif mode == 'r':
+ acc = mmap.ACCESS_READ
+ else:
+ acc = mmap.ACCESS_WRITE
+
+ start = offset - offset % mmap.ALLOCATIONGRANULARITY
+ bytes -= start
+ array_offset = offset - start
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
+
+ self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+ offset=array_offset, order=order)
+ self._mmap = mm
+ self.offset = offset
+ self.mode = mode
+
+ if is_pathlib_path(filename):
+ # special case - if we were constructed with a pathlib.path,
+ # then filename is a path object, not a string
+ self.filename = filename.resolve()
+ elif hasattr(fid, "name") and isinstance(fid.name, basestring):
+ # py3 returns int for TemporaryFile().name
+ self.filename = os.path.abspath(fid.name)
+ # same as memmap copies (e.g. memmap + 1)
+ else:
+ self.filename = None
+
+ return self
+
+ def __array_finalize__(self, obj):
+ if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
+ self._mmap = obj._mmap
+ self.filename = obj.filename
+ self.offset = obj.offset
+ self.mode = obj.mode
+ else:
+ self._mmap = None
+ self.filename = None
+ self.offset = None
+ self.mode = None
+
+ def flush(self):
+ """
+ Write any changes in the array to the file on disk.
+
+ For further information, see `memmap`.
+
+ Parameters
+ ----------
+ None
+
+ See Also
+ --------
+ memmap
+
+ """
+ if self.base is not None and hasattr(self.base, 'flush'):
+ self.base.flush()
+
+ def __array_wrap__(self, arr, context=None):
+ arr = super(memmap, self).__array_wrap__(arr, context)
+
+ # Return a memmap if a memmap was given as the output of the
+ # ufunc. Leave the arr class unchanged if self is not a memmap
+ # to keep original memmap subclasses behavior
+ if self is arr or type(self) is not memmap:
+ return arr
+ # Return scalar instead of 0d memmap, e.g. for np.sum with
+ # axis=None
+ if arr.shape == ():
+ return arr[()]
+ # Return ndarray otherwise
+ return arr.view(np.ndarray)
+
+ def __getitem__(self, index):
+ res = super(memmap, self).__getitem__(index)
+ if type(res) is memmap and res._mmap is None:
+ return res.view(type=ndarray)
+ return res
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc
new file mode 100644
index 0000000..917dc40
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/memmap.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py
new file mode 100644
index 0000000..7908969
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.py
@@ -0,0 +1,1567 @@
+"""
+Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+import functools
+import warnings
+
+from . import overrides
+from . import _multiarray_umath
+import numpy as np
+from numpy.core._multiarray_umath import *
+from numpy.core._multiarray_umath import (
+ _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
+ _ARRAY_API, _monotonicity
+ )
+
+__all__ = [
+ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
+ 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
+ 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
+ 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
+ '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
+ 'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
+ 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
+ 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
+ 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
+ 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
+ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
+ 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
+ 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
+ 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
+ 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
+ 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
+ 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
+ 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
+ 'where', 'zeros']
+
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
+
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+geterrobj.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+set_numeric_ops.__module__ = 'numpy'
+seterrobj.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+ overrides.array_function_from_dispatcher,
+ module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(prototype, dtype=None, order=None, subok=None):
+ """
+ empty_like(prototype, dtype=None, order='K', subok=True)
+
+ Return a new array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ prototype : array_like
+ The shape and data-type of `prototype` define these same attributes
+ of the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
+ contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
+ as closely as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data with the same
+ shape and type as `prototype`.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+
+ Notes
+ -----
+ This function does *not* initialize the returned array; to do that use
+ `zeros_like` or `ones_like` instead. It may be marginally faster than
+ the functions that do set the array values.
+
+ Examples
+ --------
+ >>> a = ([1,2,3], [4,5,6]) # a is array-like
+ >>> np.empty_like(a)
+ array([[-1073741821, -1073741821, 3], #random
+ [ 0, 0, -1073741821]])
+ >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+ >>> np.empty_like(a)
+ array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
+ [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+
+ """
+ return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None):
+ """
+ concatenate((a1, a2, ...), axis=0, out=None)
+
+ Join a sequence of arrays along an existing axis.
+
+ Parameters
+ ----------
+ a1, a2, ... : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
+
+ Returns
+ -------
+ res : ndarray
+ The concatenated array.
+
+ See Also
+ --------
+ ma.concatenate : Concatenate function that preserves input masks.
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ hsplit : Split array into multiple sub-arrays horizontally (column wise)
+ vsplit : Split array into multiple sub-arrays vertically (row wise)
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ stack : Stack a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise)
+ vstack : Stack arrays in sequence vertically (row wise)
+ dstack : Stack arrays in sequence depth wise (along third dimension)
+ block : Assemble arrays from blocks.
+
+ Notes
+ -----
+ When one or more of the arrays to be concatenated is a MaskedArray,
+ this function will return a MaskedArray object instead of an ndarray,
+ but the input masks are *not* preserved. In cases where a MaskedArray
+ is expected as input, use the ma.concatenate function from the masked
+ array module instead.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> b = np.array([[5, 6]])
+ >>> np.concatenate((a, b), axis=0)
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.concatenate((a, b.T), axis=1)
+ array([[1, 2, 5],
+ [3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
+
+ This function will not preserve masking of MaskedArray inputs.
+
+ >>> a = np.ma.arange(3)
+ >>> a[1] = np.ma.masked
+ >>> b = np.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ array([2, 3, 4])
+ >>> np.concatenate([a, b])
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> np.ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+ """
+ inner(a, b)
+
+ Inner product of two arrays.
+
+ Ordinary inner product of vectors for 1-D arrays (without complex
+ conjugation), in higher dimensions a sum product over the last axes.
+
+ Parameters
+ ----------
+ a, b : array_like
+ If `a` and `b` are nonscalar, their last dimensions must match.
+
+ Returns
+ -------
+ out : ndarray
+ `out.shape = a.shape[:-1] + b.shape[:-1]`
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` and `b` has different size.
+
+ See Also
+ --------
+ tensordot : Sum products over arbitrary axes.
+ dot : Generalised matrix product, using second last dimension of `b`.
+ einsum : Einstein summation convention.
+
+ Notes
+ -----
+ For vectors (1-D arrays) it computes the ordinary inner-product::
+
+ np.inner(a, b) = sum(a[:]*b[:])
+
+ More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
+
+ np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+ or explicitly::
+
+ np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
+ = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
+
+ In addition `a` or `b` may be scalars, in which case::
+
+ np.inner(a,b) = a*b
+
+ Examples
+ --------
+ Ordinary inner product for vectors:
+
+ >>> a = np.array([1,2,3])
+ >>> b = np.array([0,1,0])
+ >>> np.inner(a, b)
+ 2
+
+ A multidimensional example:
+
+ >>> a = np.arange(24).reshape((2,3,4))
+ >>> b = np.arange(4)
+ >>> np.inner(a, b)
+ array([[ 14, 38, 62],
+ [ 86, 110, 134]])
+
+ An example where `b` is a scalar:
+
+ >>> np.inner(np.eye(2), 7)
+ array([[ 7., 0.],
+ [ 0., 7.]])
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+ """
+ where(condition, [x, y])
+
+ Return elements chosen from `x` or `y` depending on `condition`.
+
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
+
+ See Also
+ --------
+ choose
+ nonzero : The function that is called when x and y are omitted
+
+ Notes
+ -----
+ If all the arrays are 1-D, `where` is equivalent to::
+
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
+ >>> np.where([[True, False], [True, True]],
+ ... [[1, 2], [3, 4]],
+ ... [[9, 8], [7, 6]])
+ array([[1, 8],
+ [3, 4]])
+
+ The shapes of x, y, and the condition are broadcast together:
+
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
+ """
+ return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+ """
+ lexsort(keys, axis=-1)
+
+ Perform an indirect stable sort using a sequence of keys.
+
+ Given multiple sorting keys, which can be interpreted as columns in a
+ spreadsheet, lexsort returns an array of integer indices that describes
+ the sort order by multiple columns. The last key in the sequence is used
+ for the primary sort order, the second-to-last key for the secondary sort
+ order, and so on. The keys argument must be a sequence of objects that
+ can be converted to arrays of the same shape. If a 2D array is provided
+ for the keys argument, it's rows are interpreted as the sorting keys and
+ sorting is according to the last row, second last row etc.
+
+ Parameters
+ ----------
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
+ The `k` different "columns" to be sorted. The last column (or row if
+ `keys` is a 2D array) is the primary sort key.
+ axis : int, optional
+ Axis to be indirectly sorted. By default, sort over the last axis.
+
+ Returns
+ -------
+ indices : (N,) ndarray of ints
+ Array of indices that sort the keys along the specified axis.
+
+ See Also
+ --------
+ argsort : Indirect sort.
+ ndarray.sort : In-place sort.
+ sort : Return a sorted copy of an array.
+
+ Examples
+ --------
+ Sort names: first by surname, then by name.
+
+ >>> surnames = ('Hertz', 'Galilei', 'Hertz')
+ >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+ >>> ind = np.lexsort((first_names, surnames))
+ >>> ind
+ array([1, 2, 0])
+
+ >>> [surnames[i] + ", " + first_names[i] for i in ind]
+ ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+ Sort two columns of numbers:
+
+ >>> a = [1,5,1,4,3,4,4] # First column
+ >>> b = [9,4,0,4,0,2,1] # Second column
+ >>> ind = np.lexsort((b,a)) # Sort by a, then by b
+ >>> print(ind)
+ [2 0 4 6 5 3 1]
+
+ >>> [(a[i],b[i]) for i in ind]
+ [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+ Note that sorting is first according to the elements of ``a``.
+ Secondary sorting is according to the elements of ``b``.
+
+ A normal ``argsort`` would have yielded:
+
+ >>> [(a[i],b[i]) for i in np.argsort(a)]
+ [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
+
+ Structured arrays are sorted lexically by ``argsort``:
+
+ >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
+ ... dtype=np.dtype([('x', int), ('y', int)]))
+
+ >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ """
+ if isinstance(keys, tuple):
+ return keys
+ else:
+ return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+ """
+ can_cast(from_, to, casting='safe')
+
+ Returns True if cast between data types can occur according to the
+ casting rule. If from is a scalar or array scalar, also returns
+ True if the scalar value can be cast without overflow or truncation
+ to an integer.
+
+ Parameters
+ ----------
+ from_ : dtype, dtype specifier, scalar, or array
+ Data type, scalar, or array to cast from.
+ to : dtype or dtype specifier
+ Data type to cast to.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Returns
+ -------
+ out : bool
+ True if cast can occur according to the casting rule.
+
+ Notes
+ -----
+ Starting in NumPy 1.9, can_cast function now returns False in 'safe'
+ casting mode for integer/float dtype and string dtype if the string dtype
+ length is not long enough to store the max integer/float value converted
+ to a string. Previously can_cast in 'safe' mode returned True for
+ integer/float dtype and a string dtype of any length.
+
+ See also
+ --------
+ dtype, result_type
+
+ Examples
+ --------
+ Basic examples
+
+ >>> np.can_cast(np.int32, np.int64)
+ True
+ >>> np.can_cast(np.float64, complex)
+ True
+ >>> np.can_cast(complex, float)
+ False
+
+ >>> np.can_cast('i8', 'f8')
+ True
+ >>> np.can_cast('i8', 'f4')
+ False
+ >>> np.can_cast('i4', 'S4')
+ False
+
+ Casting scalars
+
+ >>> np.can_cast(100, 'i1')
+ True
+ >>> np.can_cast(150, 'i1')
+ False
+ >>> np.can_cast(150, 'u1')
+ True
+
+ >>> np.can_cast(3.5e100, np.float32)
+ False
+ >>> np.can_cast(1000.0, np.float32)
+ True
+
+ Array scalar checks the value, array does not
+
+ >>> np.can_cast(np.array(1000.0), np.float32)
+ True
+ >>> np.can_cast(np.array([1000.0]), np.float32)
+ False
+
+ Using the casting rules
+
+ >>> np.can_cast('i8', 'i8', 'no')
+ True
+ >>> np.can_cast('i8', 'no')
+ False
+
+ >>> np.can_cast('i8', 'equiv')
+ True
+ >>> np.can_cast('i8', 'equiv')
+ False
+
+ >>> np.can_cast('i8', 'safe')
+ True
+ >>> np.can_cast('i4', 'safe')
+ False
+
+ >>> np.can_cast('i4', 'same_kind')
+ True
+ >>> np.can_cast('u4', 'same_kind')
+ False
+
+ >>> np.can_cast('u4', 'unsafe')
+ True
+
+ """
+ return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+ """
+ min_scalar_type(a)
+
+ For scalar ``a``, returns the data type with the smallest size
+ and smallest scalar kind which can hold its value. For non-scalar
+ array ``a``, returns the vector's dtype unmodified.
+
+ Floating point values are not demoted to integers,
+ and complex values are not demoted to floats.
+
+ Parameters
+ ----------
+ a : scalar or array_like
+ The value whose minimal data type is to be found.
+
+ Returns
+ -------
+ out : dtype
+ The minimal data type.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ See Also
+ --------
+ result_type, promote_types, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.min_scalar_type(10)
+ dtype('uint8')
+
+ >>> np.min_scalar_type(-260)
+ dtype('int16')
+
+ >>> np.min_scalar_type(3.1)
+ dtype('float16')
+
+ >>> np.min_scalar_type(1e50)
+ dtype('float64')
+
+ >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+ dtype('float64')
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+ """
+ result_type(*arrays_and_dtypes)
+
+ Returns the type that results from applying the NumPy
+ type promotion rules to the arguments.
+
+ Type promotion in NumPy works similarly to the rules in languages
+ like C++, with some slight differences. When both scalars and
+ arrays are used, the array's type takes precedence and the actual value
+ of the scalar is taken into account.
+
+ For example, calculating 3*a, where a is an array of 32-bit floats,
+ intuitively should result in a 32-bit float output. If the 3 is a
+ 32-bit integer, the NumPy rules indicate it can't convert losslessly
+ into a 32-bit float, so a 64-bit float should be the result type.
+ By examining the value of the constant, '3', we see that it fits in
+ an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+ Parameters
+ ----------
+ arrays_and_dtypes : list of arrays and dtypes
+ The operands of some operation whose result type is needed.
+
+ Returns
+ -------
+ out : dtype
+ The result type.
+
+ See also
+ --------
+ dtype, promote_types, min_scalar_type, can_cast
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The specific algorithm used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :func:`promote_types`
+ to produce the return value.
+
+ Otherwise, `min_scalar_type` is called on each array, and
+ the resulting data types are all combined with :func:`promote_types`
+ to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+ Examples
+ --------
+ >>> np.result_type(3, np.arange(7, dtype='i1'))
+ dtype('int8')
+
+ >>> np.result_type('i4', 'c8')
+ dtype('complex128')
+
+ >>> np.result_type(3.0, -2)
+ dtype('float64')
+
+ """
+ return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+ """
+ dot(a, b, out=None)
+
+ Dot product of two arrays. Specifically,
+
+ - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+ (without complex conjugation).
+
+ - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+ but using :func:`matmul` or ``a @ b`` is preferred.
+
+ - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
+ and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
+
+ - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+ the last axis of `a` and `b`.
+
+ - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+ sum product over the last axis of `a` and the second-to-last axis of `b`::
+
+ dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+ Parameters
+ ----------
+ a : array_like
+ First argument.
+ b : array_like
+ Second argument.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of `a` and `b`. If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ If `out` is given, then it is returned.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ matmul : '@' operator as method with out parameter.
+
+ Examples
+ --------
+ >>> np.dot(3, 4)
+ 12
+
+ Neither argument is complex-conjugated:
+
+ >>> np.dot([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ For 2-D arrays it is the matrix product:
+
+ >>> a = [[1, 0], [0, 1]]
+ >>> b = [[4, 1], [2, 2]]
+ >>> np.dot(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+ >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+ >>> np.dot(a, b)[2,3,2,1,2,2]
+ 499128
+ >>> sum(a[2,3,2,:] * b[1,2,:,2])
+ 499128
+
+ """
+ return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+ """
+ vdot(a, b)
+
+ Return the dot product of two vectors.
+
+ The vdot(`a`, `b`) function handles complex numbers differently than
+ dot(`a`, `b`). If the first argument is complex the complex conjugate
+ of the first argument is used for the calculation of the dot product.
+
+ Note that `vdot` handles multidimensional arrays differently than `dot`:
+ it does *not* perform a matrix product, but flattens input arguments
+ to 1-D vectors first. Consequently, it should only be used for vectors.
+
+ Parameters
+ ----------
+ a : array_like
+ If `a` is complex the complex conjugate is taken before calculation
+ of the dot product.
+ b : array_like
+ Second argument to the dot product.
+
+ Returns
+ -------
+ output : ndarray
+ Dot product of `a` and `b`. Can be an int, float, or
+ complex depending on the types of `a` and `b`.
+
+ See Also
+ --------
+ dot : Return the dot product without using the complex conjugate of the
+ first argument.
+
+ Examples
+ --------
+ >>> a = np.array([1+2j,3+4j])
+ >>> b = np.array([5+6j,7+8j])
+ >>> np.vdot(a, b)
+ (70-8j)
+ >>> np.vdot(b, a)
+ (70+8j)
+
+ Note that higher-dimensional arrays are flattened!
+
+ >>> a = np.array([[1, 4], [5, 6]])
+ >>> b = np.array([[4, 1], [2, 2]])
+ >>> np.vdot(a, b)
+ 30
+ >>> np.vdot(b, a)
+ 30
+ >>> 1*4 + 4*1 + 5*2 + 6*2
+ 30
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+ """
+ bincount(x, weights=None, minlength=0)
+
+ Count number of occurrences of each value in array of non-negative ints.
+
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. If `minlength` is specified, there will be at least this number
+ of bins in the output array (though it will be longer if necessary,
+ depending on the contents of `x`).
+ Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
+
+ Parameters
+ ----------
+ x : array_like, 1 dimension, nonnegative ints
+ Input array.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+ minlength : int, optional
+ A minimum number of bins for the output array.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+ The length of `out` is equal to ``np.amax(x)+1``.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values, or if `minlength` is negative.
+ TypeError
+ If the type of the input is float or complex.
+
+ See Also
+ --------
+ histogram, digitize, unique
+
+ Examples
+ --------
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ The input array needs to be of integer dtype, otherwise a
+ TypeError is raised:
+
+ >>> np.bincount(np.arange(5, dtype=float))
+ Traceback (most recent call last):
+ File "", line 1, in
+ TypeError: array cannot be safely cast to required type
+
+ A possible use of ``bincount`` is to perform sums over
+ variable-size chunks of an array, using the ``weights`` keyword.
+
+ >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+ >>> x = np.array([0, 1, 1, 2, 2, 2])
+ >>> np.bincount(x, weights=w)
+ array([ 0.3, 0.7, 1.1])
+
+ """
+ return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+ """
+ ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+ Converts a tuple of index arrays into an array of flat
+ indices, applying boundary modes to the multi-index.
+
+ Parameters
+ ----------
+ multi_index : tuple of array_like
+ A tuple of integer arrays, one array for each dimension.
+ dims : tuple of ints
+ The shape of array into which the indices from ``multi_index`` apply.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices are handled. Can specify
+ either one mode or a tuple of modes, one mode per index.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ In 'clip' mode, a negative index which would normally
+ wrap will clip to 0 instead.
+ order : {'C', 'F'}, optional
+ Determines whether the multi-index should be viewed as
+ indexing in row-major (C-style) or column-major
+ (Fortran-style) order.
+
+ Returns
+ -------
+ raveled_indices : ndarray
+ An array of indices into the flattened version of an array
+ of dimensions ``dims``.
+
+ See Also
+ --------
+ unravel_index
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ Examples
+ --------
+ >>> arr = np.array([[3,6,6],[4,5,1]])
+ >>> np.ravel_multi_index(arr, (7,6))
+ array([22, 41, 37])
+ >>> np.ravel_multi_index(arr, (7,6), order='F')
+ array([31, 41, 13])
+ >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+ array([22, 23, 19])
+ >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+ array([12, 13, 13])
+
+ >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+ 1621
+ """
+ return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None, dims=None):
+ """
+ unravel_index(indices, shape, order='C')
+
+ Converts a flat index or array of flat indices into a tuple
+ of coordinate arrays.
+
+ Parameters
+ ----------
+ indices : array_like
+ An integer array whose elements are indices into the flattened
+ version of an array of dimensions ``shape``. Before version 1.6.0,
+ this function accepted just one index value.
+ shape : tuple of ints
+ The shape of the array to use for unraveling ``indices``.
+
+ .. versionchanged:: 1.16.0
+ Renamed from ``dims`` to ``shape``.
+
+ order : {'C', 'F'}, optional
+ Determines whether the indices should be viewed as indexing in
+ row-major (C-style) or column-major (Fortran-style) order.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ unraveled_coords : tuple of ndarray
+ Each array in the tuple has the same shape as the ``indices``
+ array.
+
+ See Also
+ --------
+ ravel_multi_index
+
+ Examples
+ --------
+ >>> np.unravel_index([22, 41, 37], (7,6))
+ (array([3, 6, 6]), array([4, 5, 1]))
+ >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+ (array([3, 6, 6]), array([4, 5, 1]))
+
+ >>> np.unravel_index(1621, (6,7,8,9))
+ (3, 1, 4, 1)
+
+ """
+ if dims is not None:
+ warnings.warn("'shape' argument should be used instead of 'dims'",
+ DeprecationWarning, stacklevel=3)
+ return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+ """
+ return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, mask, values):
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : array_like
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """
+ return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(myarray, axis=None):
+ """
+ packbits(myarray, axis=None)
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ myarray : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],[64]],[[192],[32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """
+ return (myarray,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(myarray, axis=None):
+ """
+ unpackbits(myarray, axis=None)
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `myarray` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is either
+ 1-D (if `axis` is None) or the same shape as the input array with unpacking
+ done along the axis specified.
+
+ Parameters
+ ----------
+ myarray : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in a uint8
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+
+ """
+ return (myarray,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+ """
+ shares_memory(a, b, max_work=None)
+
+ Determine if two arrays share memory
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=MAY_SHARE_EXACT (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays.
+ max_work=MAY_SHARE_BOUNDS
+ Only the memory bounds of a and b are checked.
+
+ Raises
+ ------
+ numpy.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+ """
+ may_share_memory(a, b, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+ """
+ is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ Calculates which of the given dates are valid days, and which are not.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of bool, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of bool
+ An array with the same shape as ``dates``, containing True for
+ each valid day, and False for each invalid day.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # The weekdays are Friday, Saturday, and Monday
+ ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ array([False, False, True], dtype='bool')
+ """
+ return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ First adjusts the date to fall on a valid day according to
+ the ``roll`` rule, then applies offsets to the given dates
+ counted in valid days.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ offsets : array_like of int
+ The array of offsets, which is broadcast with ``dates``.
+ roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
+ How to treat dates that do not fall on a valid day. The default
+ is 'raise'.
+
+ * 'raise' means to raise an exception for an invalid day.
+ * 'nat' means to return a NaT (not-a-time) for an invalid day.
+ * 'forward' and 'following' mean to take the first valid day
+ later in time.
+ * 'backward' and 'preceding' mean to take the first valid day
+ earlier in time.
+ * 'modifiedfollowing' means to take the first valid day
+ later in time unless it is across a Month boundary, in which
+ case to take the first valid day earlier in time.
+ * 'modifiedpreceding' means to take the first valid day
+ earlier in time unless it is across a Month boundary, in which
+ case to take the first valid day later in time.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of datetime64[D], optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of datetime64[D]
+ An array with a shape from broadcasting ``dates`` and ``offsets``
+ together, containing the dates with offsets applied.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # First business day in October 2011 (not accounting for holidays)
+ ... np.busday_offset('2011-10', 0, roll='forward')
+ numpy.datetime64('2011-10-03','D')
+ >>> # Last business day in February 2012 (not accounting for holidays)
+ ... np.busday_offset('2012-03', -1, roll='forward')
+ numpy.datetime64('2012-02-29','D')
+ >>> # Third Wednesday in January 2011
+ ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+ numpy.datetime64('2011-01-19','D')
+ >>> # 2012 Mother's Day in Canada and the U.S.
+ ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+ numpy.datetime64('2012-05-13','D')
+
+ >>> # First business day on or after a date
+ ... np.busday_offset('2011-03-20', 0, roll='forward')
+ numpy.datetime64('2011-03-21','D')
+ >>> np.busday_offset('2011-03-22', 0, roll='forward')
+ numpy.datetime64('2011-03-22','D')
+ >>> # First business day after a date
+ ... np.busday_offset('2011-03-20', 1, roll='backward')
+ numpy.datetime64('2011-03-21','D')
+ >>> np.busday_offset('2011-03-22', 1, roll='backward')
+ numpy.datetime64('2011-03-23','D')
+ """
+ return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
+
+ Counts the number of valid days between `begindates` and
+ `enddates`, not including the day of `enddates`.
+
+ If ``enddates`` specifies a date value that is earlier than the
+ corresponding ``begindates`` date value, the count will be negative.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ begindates : array_like of datetime64[D]
+ The array of the first dates for counting.
+ enddates : array_like of datetime64[D]
+ The array of the end dates for counting, which are excluded
+ from the count themselves.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of int, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of int
+ An array with a shape from broadcasting ``begindates`` and ``enddates``
+ together, containing the number of valid days between
+ the begin and end dates.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+
+ Examples
+ --------
+ >>> # Number of weekdays in January 2011
+ ... np.busday_count('2011-01', '2011-02')
+ 21
+ >>> # Number of weekdays in 2011
+ ... np.busday_count('2011', '2012')
+ 260
+ >>> # Number of Saturdays in 2011
+ ... np.busday_count('2011', '2012', weekmask='Sat')
+ 53
+ """
+ return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+ _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+ """
+ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+ Convert an array of datetimes into an array of strings.
+
+ Parameters
+ ----------
+ arr : array_like of datetime64
+ The array of UTC timestamps to format.
+ unit : str
+ One of None, 'auto', or a :ref:`datetime unit `.
+ timezone : {'naive', 'UTC', 'local'} or tzinfo
+ Timezone information to use when displaying the datetime. If 'UTC', end
+ with a Z to indicate UTC time. If 'local', convert to the local timezone
+ first, and suffix with a +-#### timezone offset. If a tzinfo object,
+ then do as with 'local', but use the specified timezone.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+ Casting to allow when changing between datetime units.
+
+ Returns
+ -------
+ str_arr : ndarray
+ An array of strings the same shape as `arr`.
+
+ Examples
+ --------
+ >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+ >>> d
+ array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+ '2002-10-27T07:30'], dtype='datetime64[m]')
+
+ Setting the timezone to UTC shows the same information, but with a Z suffix
+
+ >>> np.datetime_as_string(d, timezone='UTC')
+ array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+ '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+ array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+ '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h')
+ array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+ dtype='>> np.datetime_as_string(d, unit='s')
+ array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+ '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe')
+ TypeError: Cannot create a datetime string as units 'h' from a NumPy
+ datetime with units 'm' according to the rule 'safe'
+ """
+ return (arr,)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc
new file mode 100644
index 0000000..79a3d61
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/multiarray.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py
new file mode 100644
index 0000000..8768cbe
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.py
@@ -0,0 +1,3100 @@
+from __future__ import division, absolute_import, print_function
+
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+import functools
+import itertools
+import operator
+import sys
+import warnings
+import numbers
+
+import numpy as np
+from . import multiarray
+from .multiarray import (
+ _fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
+ BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
+ WRAP, arange, array, broadcast, can_cast, compare_chararrays,
+ concatenate, copyto, dot, dtype, empty,
+ empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
+ inner, int_asbuffer, lexsort, matmul, may_share_memory,
+ min_scalar_type, ndarray, nditer, nested_iters, promote_types,
+ putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
+ zeros, normalize_axis_index)
+if sys.version_info[0] < 3:
+ from .multiarray import newbuffer, getbuffer
+
+from . import overrides
+from . import umath
+from .overrides import set_module
+from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
+ ERR_LOG, ERR_DEFAULT, PINF, NAN)
+from . import numerictypes
+from .numerictypes import longlong, intc, int_, float_, complex_, bool_
+from ._internal import TooHardError, AxisError
+
+bitwise_not = invert
+ufunc = type(sin)
+newaxis = None
+
+if sys.version_info[0] >= 3:
+ if sys.version_info[1] in (6, 7):
+ try:
+ import pickle5 as pickle
+ except ImportError:
+ import pickle
+ else:
+ import pickle
+ basestring = str
+ import builtins
+else:
+ import cPickle as pickle
+ import __builtin__ as builtins
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def loads(*args, **kwargs):
+ # NumPy 1.15.0, 2017-12-10
+ warnings.warn(
+ "np.core.numeric.loads is deprecated, use pickle.loads instead",
+ DeprecationWarning, stacklevel=2)
+ return pickle.loads(*args, **kwargs)
+
+
+__all__ = [
+ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
+ 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+ 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
+ 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+ 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
+ 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
+ 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+ 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
+ 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
+ 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+ 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
+ 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
+
+if sys.version_info[0] < 3:
+ __all__.extend(['getbuffer', 'newbuffer'])
+
+
+@set_module('numpy')
+class ComplexWarning(RuntimeWarning):
+ """
+ The warning raised when casting a complex dtype to a real dtype.
+
+ As implemented, casting a complex number to a real discards its imaginary
+ part, but this behavior may not be what the user actually wants.
+
+ """
+ pass
+
+
+def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_zeros_like_dispatcher)
+def zeros_like(a, dtype=None, order='K', subok=True):
+ """
+ Return an array of zeros with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of zeros with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ zeros : Return a new array setting values to zero.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> x = x.reshape((2, 3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.zeros_like(x)
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+ >>> y = np.arange(3, dtype=float)
+ >>> y
+ array([ 0., 1., 2.])
+ >>> np.zeros_like(y)
+ array([ 0., 0., 0.])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ # needed instead of a 0 to get same result as zeros for for string dtypes
+ z = zeros(1, dtype=res.dtype)
+ multiarray.copyto(res, z, casting='unsafe')
+ return res
+
+
+@set_module('numpy')
+def ones(shape, dtype=None, order='C'):
+ """
+ Return a new array of given shape and type, filled with ones.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: C
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of ones with the given shape, dtype, and order.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ empty : Return a new uninitialized array.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Examples
+ --------
+ >>> np.ones(5)
+ array([ 1., 1., 1., 1., 1.])
+
+ >>> np.ones((5,), dtype=int)
+ array([1, 1, 1, 1, 1])
+
+ >>> np.ones((2, 1))
+ array([[ 1.],
+ [ 1.]])
+
+ >>> s = (2,2)
+ >>> np.ones(s)
+ array([[ 1., 1.],
+ [ 1., 1.]])
+
+ """
+ a = empty(shape, dtype, order)
+ multiarray.copyto(a, 1, casting='unsafe')
+ return a
+
+
+def _ones_like_dispatcher(a, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_ones_like_dispatcher)
+def ones_like(a, dtype=None, order='K', subok=True):
+ """
+ Return an array of ones with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of ones with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ ones : Return a new array setting values to one.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> x = x.reshape((2, 3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.ones_like(x)
+ array([[1, 1, 1],
+ [1, 1, 1]])
+
+ >>> y = np.arange(3, dtype=float)
+ >>> y
+ array([ 0., 1., 2.])
+ >>> np.ones_like(y)
+ array([ 1., 1., 1.])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ multiarray.copyto(res, 1, casting='unsafe')
+ return res
+
+
+@set_module('numpy')
+def full(shape, fill_value, dtype=None, order='C'):
+ """
+ Return a new array of given shape and type, filled with `fill_value`.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ fill_value : scalar
+ Fill value.
+ dtype : data-type, optional
+ The desired data-type for the array The default, `None`, means
+ `np.array(fill_value).dtype`.
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the given shape, dtype, and order.
+
+ See Also
+ --------
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+
+ Examples
+ --------
+ >>> np.full((2, 2), np.inf)
+ array([[ inf, inf],
+ [ inf, inf]])
+ >>> np.full((2, 2), 10)
+ array([[10, 10],
+ [10, 10]])
+
+ """
+ if dtype is None:
+ dtype = array(fill_value).dtype
+ a = empty(shape, dtype, order)
+ multiarray.copyto(a, fill_value, casting='unsafe')
+ return a
+
+
+def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_full_like_dispatcher)
+def full_like(a, fill_value, dtype=None, order='K', subok=True):
+ """
+ Return a full array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ fill_value : scalar
+ Fill value.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full : Return a new array of given shape filled with value.
+
+ Examples
+ --------
+ >>> x = np.arange(6, dtype=int)
+ >>> np.full_like(x, 1)
+ array([1, 1, 1, 1, 1, 1])
+ >>> np.full_like(x, 0.1)
+ array([0, 0, 0, 0, 0, 0])
+ >>> np.full_like(x, 0.1, dtype=np.double)
+ array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ >>> np.full_like(x, np.nan, dtype=np.double)
+ array([ nan, nan, nan, nan, nan, nan])
+
+ >>> y = np.arange(6, dtype=np.double)
+ >>> np.full_like(y, 0.1)
+ array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ multiarray.copyto(res, fill_value, casting='unsafe')
+ return res
+
+
+def _count_nonzero_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_nonzero_dispatcher)
+def count_nonzero(a, axis=None):
+ """
+ Counts the number of non-zero values in the array ``a``.
+
+ The word "non-zero" is in reference to the Python 2.x
+ built-in method ``__nonzero__()`` (renamed ``__bool__()``
+ in Python 3.x) of Python objects that tests an object's
+ "truthfulness". For example, any number is considered
+ truthful if it is nonzero, whereas any string is considered
+ truthful if it is not the empty string. Thus, this function
+ (recursively) counts how many elements in ``a`` (and in
+ sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
+ method evaluated to ``True``.
+
+ Parameters
+ ----------
+ a : array_like
+ The array for which to count non-zeros.
+ axis : int or tuple, optional
+ Axis or tuple of axes along which to count non-zeros.
+ Default is None, meaning that non-zeros will be counted
+ along a flattened version of ``a``.
+
+ .. versionadded:: 1.12.0
+
+ Returns
+ -------
+ count : int or array of int
+ Number of non-zero values in the array along a given axis.
+ Otherwise, the total number of non-zero values in the array
+ is returned.
+
+ See Also
+ --------
+ nonzero : Return the coordinates of all the non-zero values.
+
+ Examples
+ --------
+ >>> np.count_nonzero(np.eye(4))
+ 4
+ >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
+ 5
+ >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0)
+ array([1, 1, 1, 1, 1])
+ >>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1)
+ array([2, 3])
+
+ """
+ if axis is None:
+ return multiarray.count_nonzero(a)
+
+ a = asanyarray(a)
+
+ # TODO: this works around .astype(bool) not working properly (gh-9847)
+ if np.issubdtype(a.dtype, np.character):
+ a_bool = a != a.dtype.type()
+ else:
+ a_bool = a.astype(np.bool_, copy=False)
+
+ return a_bool.sum(axis=axis, dtype=np.intp)
+
+
+@set_module('numpy')
+def asarray(a, dtype=None, order=None):
+ """Convert the input to an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major (C-style) or
+ column-major (Fortran-style) memory representation.
+ Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of `a`. No copy is performed if the input
+ is already an ndarray with matching dtype and order. If `a` is a
+ subclass of ndarray, a base class ndarray is returned.
+
+ See Also
+ --------
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asarray(a)
+ array([1, 2])
+
+ Existing arrays are not copied:
+
+ >>> a = np.array([1, 2])
+ >>> np.asarray(a) is a
+ True
+
+ If `dtype` is set, array is copied only if dtype does not match:
+
+ >>> a = np.array([1, 2], dtype=np.float32)
+ >>> np.asarray(a, dtype=np.float32) is a
+ True
+ >>> np.asarray(a, dtype=np.float64) is a
+ False
+
+ Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+ >>> issubclass(np.recarray, np.ndarray)
+ True
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asarray(a) is a
+ False
+ >>> np.asanyarray(a) is a
+ True
+
+ """
+ return array(a, dtype, copy=False, order=order)
+
+
+@set_module('numpy')
+def asanyarray(a, dtype=None, order=None):
+ """Convert the input to an ndarray, but pass ndarray subclasses through.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes scalars, lists, lists of tuples, tuples, tuples of tuples,
+ tuples of lists, and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major (C-style) or column-major
+ (Fortran-style) memory representation. Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray or an ndarray subclass
+ Array interpretation of `a`. If `a` is an ndarray or a subclass
+ of ndarray, it is returned as-is and no copy is performed.
+
+ See Also
+ --------
+ asarray : Similar function which always returns ndarrays.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and
+ Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asanyarray(a)
+ array([1, 2])
+
+ Instances of `ndarray` subclasses are passed through as-is:
+
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asanyarray(a) is a
+ True
+
+ """
+ return array(a, dtype, copy=False, order=order, subok=True)
+
+
+@set_module('numpy')
+def ascontiguousarray(a, dtype=None):
+ """
+ Return a contiguous array (ndim >= 1) in memory (C order).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ Data-type of returned array.
+
+ Returns
+ -------
+ out : ndarray
+ Contiguous array of same shape and content as `a`, with type `dtype`
+ if specified.
+
+ See Also
+ --------
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> np.ascontiguousarray(x, dtype=np.float32)
+ array([[ 0., 1., 2.],
+ [ 3., 4., 5.]], dtype=float32)
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """
+ return array(a, dtype, copy=False, order='C', ndmin=1)
+
+
+@set_module('numpy')
+def asfortranarray(a, dtype=None):
+ """
+ Return an array (ndim >= 1) laid out in Fortran order in memory.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ By default, the data-type is inferred from the input data.
+
+ Returns
+ -------
+ out : ndarray
+ The input `a` in Fortran, or column-major, order.
+
+ See Also
+ --------
+ ascontiguousarray : Convert input to a contiguous (C order) array.
+ asanyarray : Convert input to an ndarray with either row or
+ column-major memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> y = np.asfortranarray(x)
+ >>> x.flags['F_CONTIGUOUS']
+ False
+ >>> y.flags['F_CONTIGUOUS']
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """
+ return array(a, dtype, copy=False, order='F', ndmin=1)
+
+
+@set_module('numpy')
+def require(a, dtype=None, requirements=None):
+ """
+ Return an ndarray of the provided type that satisfies requirements.
+
+ This function is useful to be sure that an array with the correct flags
+ is returned for passing to compiled code (perhaps through ctypes).
+
+ Parameters
+ ----------
+ a : array_like
+ The object to be converted to a type-and-requirement-satisfying array.
+ dtype : data-type
+ The required data-type. If None preserve the current dtype. If your
+ application requires the data to be in native byteorder, include
+ a byteorder specification as a part of the dtype specification.
+ requirements : str or list of str
+ The requirements list can be any of the following
+
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
+ * 'WRITEABLE' ('W') - ensure a writable array
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+
+ See Also
+ --------
+ asarray : Convert input to an ndarray.
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Notes
+ -----
+ The returned array will be guaranteed to have the listed requirements
+ by making a copy if needed.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+ >>> y.flags
+ C_CONTIGUOUS : False
+ F_CONTIGUOUS : True
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+
+ """
+ possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+ 'A': 'A', 'ALIGNED': 'A',
+ 'W': 'W', 'WRITEABLE': 'W',
+ 'O': 'O', 'OWNDATA': 'O',
+ 'E': 'E', 'ENSUREARRAY': 'E'}
+ if not requirements:
+ return asanyarray(a, dtype=dtype)
+ else:
+ requirements = {possible_flags[x.upper()] for x in requirements}
+
+ if 'E' in requirements:
+ requirements.remove('E')
+ subok = False
+ else:
+ subok = True
+
+ order = 'A'
+ if requirements >= {'C', 'F'}:
+ raise ValueError('Cannot specify both "C" and "F" order')
+ elif 'F' in requirements:
+ order = 'F'
+ requirements.remove('F')
+ elif 'C' in requirements:
+ order = 'C'
+ requirements.remove('C')
+
+ arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
+
+ for prop in requirements:
+ if not arr.flags[prop]:
+ arr = arr.copy(order)
+ break
+ return arr
+
+
+@set_module('numpy')
+def isfortran(a):
+ """
+ Returns True if the array is Fortran contiguous but *not* C contiguous.
+
+ This function is obsolete and, because of changes due to relaxed stride
+ checking, its return value for the same array may differ for versions
+ of NumPy >= 1.10.0 and previous versions. If you only want to check if an
+ array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+
+
+ Examples
+ --------
+
+ np.array allows to specify whether the array is written in C-contiguous
+ order (last index varies the fastest), or FORTRAN-contiguous order in
+ memory (first index varies the fastest).
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(a)
+ False
+
+ >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
+ >>> b
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(b)
+ True
+
+
+ The transpose of a C-ordered array is a FORTRAN-ordered array.
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(a)
+ False
+ >>> b = a.T
+ >>> b
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> np.isfortran(b)
+ True
+
+ C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
+
+ >>> np.isfortran(np.array([1, 2], order='FORTRAN'))
+ False
+
+ """
+ return a.flags.fnc
+
+
+def _argwhere_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_argwhere_dispatcher)
+def argwhere(a):
+ """
+ Find the indices of array elements that are non-zero, grouped by element.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+
+ Returns
+ -------
+ index_array : ndarray
+ Indices of elements that are non-zero. Indices are grouped by element.
+
+ See Also
+ --------
+ where, nonzero
+
+ Notes
+ -----
+ ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
+
+ The output of ``argwhere`` is not suitable for indexing arrays.
+ For this purpose use ``nonzero(a)`` instead.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.argwhere(x>1)
+ array([[0, 2],
+ [1, 0],
+ [1, 1],
+ [1, 2]])
+
+ """
+ return transpose(nonzero(a))
+
+
+def _flatnonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_flatnonzero_dispatcher)
+def flatnonzero(a):
+ """
+ Return indices that are non-zero in the flattened version of a.
+
+ This is equivalent to np.nonzero(np.ravel(a))[0].
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+
+ Returns
+ -------
+ res : ndarray
+ Output array, containing the indices of the elements of `a.ravel()`
+ that are non-zero.
+
+ See Also
+ --------
+ nonzero : Return the indices of the non-zero elements of the input array.
+ ravel : Return a 1-D array containing the elements of the input array.
+
+ Examples
+ --------
+ >>> x = np.arange(-2, 3)
+ >>> x
+ array([-2, -1, 0, 1, 2])
+ >>> np.flatnonzero(x)
+ array([0, 1, 3, 4])
+
+ Use the indices of the non-zero elements as an index array to extract
+ these elements:
+
+ >>> x.ravel()[np.flatnonzero(x)]
+ array([-2, -1, 1, 2])
+
+ """
+ return np.nonzero(np.ravel(a))[0]
+
+
+_mode_from_name_dict = {'v': 0,
+ 's': 1,
+ 'f': 2}
+
+
+def _mode_from_name(mode):
+ if isinstance(mode, basestring):
+ return _mode_from_name_dict[mode.lower()[0]]
+ return mode
+
+
+def _correlate_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_correlate_dispatcher)
+def correlate(a, v, mode='valid'):
+ """
+ Cross-correlation of two 1-dimensional sequences.
+
+ This function computes the correlation as generally defined in signal
+ processing texts::
+
+ c_{av}[k] = sum_n a[n+k] * conj(v[n])
+
+ with a and v sequences being zero-padded where necessary and conj being
+ the conjugate.
+
+ Parameters
+ ----------
+ a, v : array_like
+ Input sequences.
+ mode : {'valid', 'same', 'full'}, optional
+ Refer to the `convolve` docstring. Note that the default
+ is 'valid', unlike `convolve`, which uses 'full'.
+ old_behavior : bool
+ `old_behavior` was removed in NumPy 1.10. If you need the old
+ behavior, use `multiarray.correlate`.
+
+ Returns
+ -------
+ out : ndarray
+ Discrete cross-correlation of `a` and `v`.
+
+ See Also
+ --------
+ convolve : Discrete, linear convolution of two one-dimensional sequences.
+ multiarray.correlate : Old, no conjugate, version of correlate.
+
+ Notes
+ -----
+ The definition of correlation above is not unique and sometimes correlation
+ may be defined differently. Another common definition is::
+
+ c'_{av}[k] = sum_n a[n] conj(v[n+k])
+
+ which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
+
+ Examples
+ --------
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5])
+ array([ 3.5])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
+ array([ 2. , 3.5, 3. ])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
+ array([ 0.5, 2. , 3.5, 3. , 0. ])
+
+ Using complex sequences:
+
+ >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
+ array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
+
+ Note that you get the time reversed, complex conjugated result
+ when the two input sequences change places, i.e.,
+ ``c_{va}[k] = c^{*}_{av}[-k]``:
+
+ >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
+ array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
+
+ """
+ mode = _mode_from_name(mode)
+ return multiarray.correlate2(a, v, mode)
+
+
+def _convolve_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_convolve_dispatcher)
+def convolve(a, v, mode='full'):
+ """
+ Returns the discrete, linear convolution of two one-dimensional sequences.
+
+ The convolution operator is often seen in signal processing, where it
+ models the effect of a linear time-invariant system on a signal [1]_. In
+ probability theory, the sum of two independent random variables is
+ distributed according to the convolution of their individual
+ distributions.
+
+ If `v` is longer than `a`, the arrays are swapped before computation.
+
+ Parameters
+ ----------
+ a : (N,) array_like
+ First one-dimensional input array.
+ v : (M,) array_like
+ Second one-dimensional input array.
+ mode : {'full', 'valid', 'same'}, optional
+ 'full':
+ By default, mode is 'full'. This returns the convolution
+ at each point of overlap, with an output shape of (N+M-1,). At
+ the end-points of the convolution, the signals do not overlap
+ completely, and boundary effects may be seen.
+
+ 'same':
+ Mode 'same' returns output of length ``max(M, N)``. Boundary
+ effects are still visible.
+
+ 'valid':
+ Mode 'valid' returns output of length
+ ``max(M, N) - min(M, N) + 1``. The convolution product is only given
+ for points where the signals overlap completely. Values outside
+ the signal boundary have no effect.
+
+ Returns
+ -------
+ out : ndarray
+ Discrete, linear convolution of `a` and `v`.
+
+ See Also
+ --------
+ scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
+ Transform.
+ scipy.linalg.toeplitz : Used to construct the convolution operator.
+ polymul : Polynomial multiplication. Same output as convolve, but also
+ accepts poly1d objects as input.
+
+ Notes
+ -----
+ The discrete convolution operation is defined as
+
+ .. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
+
+ It can be shown that a convolution :math:`x(t) * y(t)` in time/space
+ is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
+ domain, after appropriate padding (padding is necessary to prevent
+ circular convolution). Since multiplication is more efficient (faster)
+ than convolution, the function `scipy.signal.fftconvolve` exploits the
+ FFT to calculate the convolution of large data-sets.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Convolution",
+ https://en.wikipedia.org/wiki/Convolution
+
+ Examples
+ --------
+ Note how the convolution operator flips the second array
+ before "sliding" the two across one another:
+
+ >>> np.convolve([1, 2, 3], [0, 1, 0.5])
+ array([ 0. , 1. , 2.5, 4. , 1.5])
+
+ Only return the middle values of the convolution.
+ Contains boundary effects, where zeros are taken
+ into account:
+
+ >>> np.convolve([1,2,3],[0,1,0.5], 'same')
+ array([ 1. , 2.5, 4. ])
+
+ The two arrays are of the same length, so there
+ is only one position where they completely overlap:
+
+ >>> np.convolve([1,2,3],[0,1,0.5], 'valid')
+ array([ 2.5])
+
+ """
+ a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
+ if (len(v) > len(a)):
+ a, v = v, a
+ if len(a) == 0:
+ raise ValueError('a cannot be empty')
+ if len(v) == 0:
+ raise ValueError('v cannot be empty')
+ mode = _mode_from_name(mode)
+ return multiarray.correlate(a, v[::-1], mode)
+
+
+def _outer_dispatcher(a, b, out=None):
+ return (a, b, out)
+
+
+@array_function_dispatch(_outer_dispatcher)
+def outer(a, b, out=None):
+ """
+ Compute the outer product of two vectors.
+
+ Given two vectors, ``a = [a0, a1, ..., aM]`` and
+ ``b = [b0, b1, ..., bN]``,
+ the outer product [1]_ is::
+
+ [[a0*b0 a0*b1 ... a0*bN ]
+ [a1*b0 .
+ [ ... .
+ [aM*b0 aM*bN ]]
+
+ Parameters
+ ----------
+ a : (M,) array_like
+ First input vector. Input is flattened if
+ not already 1-dimensional.
+ b : (N,) array_like
+ Second input vector. Input is flattened if
+ not already 1-dimensional.
+ out : (M, N) ndarray, optional
+ A location where the result is stored
+
+ .. versionadded:: 1.9.0
+
+ Returns
+ -------
+ out : (M, N) ndarray
+ ``out[i, j] = a[i] * b[j]``
+
+ See also
+ --------
+ inner
+ einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
+ ufunc.outer : A generalization to N dimensions and other operations.
+ ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
+
+ References
+ ----------
+ .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
+ ed., Baltimore, MD, Johns Hopkins University Press, 1996,
+ pg. 8.
+
+ Examples
+ --------
+ Make a (*very* coarse) grid for computing a Mandelbrot set:
+
+ >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
+ >>> rl
+ array([[-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.]])
+ >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
+ >>> im
+ array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
+ [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
+ [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
+ >>> grid = rl + im
+ >>> grid
+ array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
+ [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
+ [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
+ [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
+ [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
+
+ An example using a "vector" of letters:
+
+ >>> x = np.array(['a', 'b', 'c'], dtype=object)
+ >>> np.outer(x, [1, 2, 3])
+ array([[a, aa, aaa],
+ [b, bb, bbb],
+ [c, cc, ccc]], dtype=object)
+
+ """
+ a = asarray(a)
+ b = asarray(b)
+ return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
+
+
+def _tensordot_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensordot_dispatcher)
+def tensordot(a, b, axes=2):
+ """
+ Compute tensor dot product along specified axes for arrays >= 1-D.
+
+ Given two tensors (arrays of dimension greater than or equal to one),
+ `a` and `b`, and an array_like object containing two array_like
+ objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
+ elements (components) over the axes specified by ``a_axes`` and
+ ``b_axes``. The third argument can be a single non-negative
+ integer_like scalar, ``N``; if it is such, then the last ``N``
+ dimensions of `a` and the first ``N`` dimensions of `b` are summed
+ over.
+
+ Parameters
+ ----------
+ a, b : array_like, len(shape) >= 1
+ Tensors to "dot".
+
+ axes : int or (2,) array_like
+ * integer_like
+ If an int N, sum over the last N axes of `a` and the first N axes
+ of `b` in order. The sizes of the corresponding axes must match.
+ * (2,) array_like
+ Or, a list of axes to be summed over, first sequence applying to `a`,
+ second to `b`. Both elements array_like must be of the same length.
+
+ See Also
+ --------
+ dot, einsum
+
+ Notes
+ -----
+ Three common use cases are:
+ * ``axes = 0`` : tensor product :math:`a\\otimes b`
+ * ``axes = 1`` : tensor dot product :math:`a\\cdot b`
+ * ``axes = 2`` : (default) tensor double contraction :math:`a:b`
+
+ When `axes` is integer_like, the sequence for evaluation will be: first
+ the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
+ Nth axis in `b` last.
+
+ When there is more than one axis to sum over - and they are not the last
+ (first) axes of `a` (`b`) - the argument `axes` should consist of
+ two sequences of the same length, with the first axis to sum over given
+ first in both sequences, the second axis second, and so forth.
+
+ Examples
+ --------
+ A "traditional" example:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
+ >>> c.shape
+ (5, 2)
+ >>> c
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> # A slower but equivalent way of computing the same...
+ >>> d = np.zeros((5,2))
+ >>> for i in range(5):
+ ... for j in range(2):
+ ... for k in range(3):
+ ... for n in range(4):
+ ... d[i,j] += a[k,n,i] * b[n,k,j]
+ >>> c == d
+ array([[ True, True],
+ [ True, True],
+ [ True, True],
+ [ True, True],
+ [ True, True]])
+
+ An extended example taking advantage of the overloading of + and \\*:
+
+ >>> a = np.array(range(1, 9))
+ >>> a.shape = (2, 2, 2)
+ >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
+ >>> A.shape = (2, 2)
+ >>> a; A
+ array([[[1, 2],
+ [3, 4]],
+ [[5, 6],
+ [7, 8]]])
+ array([[a, b],
+ [c, d]], dtype=object)
+
+ >>> np.tensordot(a, A) # third argument default is 2 for double-contraction
+ array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
+
+ >>> np.tensordot(a, A, 1)
+ array([[[acc, bdd],
+ [aaacccc, bbbdddd]],
+ [[aaaaacccccc, bbbbbdddddd],
+ [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
+
+ >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
+ array([[[[[a, b],
+ [c, d]],
+ ...
+
+ >>> np.tensordot(a, A, (0, 1))
+ array([[[abbbbb, cddddd],
+ [aabbbbbb, ccdddddd]],
+ [[aaabbbbbbb, cccddddddd],
+ [aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
+
+ >>> np.tensordot(a, A, (2, 1))
+ array([[[abb, cdd],
+ [aaabbbb, cccdddd]],
+ [[aaaaabbbbbb, cccccdddddd],
+ [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
+
+ >>> np.tensordot(a, A, ((0, 1), (0, 1)))
+ array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
+
+ >>> np.tensordot(a, A, ((2, 1), (1, 0)))
+ array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
+
+ """
+ try:
+ iter(axes)
+ except Exception:
+ axes_a = list(range(-axes, 0))
+ axes_b = list(range(0, axes))
+ else:
+ axes_a, axes_b = axes
+ try:
+ na = len(axes_a)
+ axes_a = list(axes_a)
+ except TypeError:
+ axes_a = [axes_a]
+ na = 1
+ try:
+ nb = len(axes_b)
+ axes_b = list(axes_b)
+ except TypeError:
+ axes_b = [axes_b]
+ nb = 1
+
+ a, b = asarray(a), asarray(b)
+ as_ = a.shape
+ nda = a.ndim
+ bs = b.shape
+ ndb = b.ndim
+ equal = True
+ if na != nb:
+ equal = False
+ else:
+ for k in range(na):
+ if as_[axes_a[k]] != bs[axes_b[k]]:
+ equal = False
+ break
+ if axes_a[k] < 0:
+ axes_a[k] += nda
+ if axes_b[k] < 0:
+ axes_b[k] += ndb
+ if not equal:
+ raise ValueError("shape-mismatch for sum")
+
+ # Move the axes to sum over to the end of "a"
+ # and to the front of "b"
+ notin = [k for k in range(nda) if k not in axes_a]
+ newaxes_a = notin + axes_a
+ N2 = 1
+ for axis in axes_a:
+ N2 *= as_[axis]
+ newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
+ olda = [as_[axis] for axis in notin]
+
+ notin = [k for k in range(ndb) if k not in axes_b]
+ newaxes_b = axes_b + notin
+ N2 = 1
+ for axis in axes_b:
+ N2 *= bs[axis]
+ newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
+ oldb = [bs[axis] for axis in notin]
+
+ at = a.transpose(newaxes_a).reshape(newshape_a)
+ bt = b.transpose(newaxes_b).reshape(newshape_b)
+ res = dot(at, bt)
+ return res.reshape(olda + oldb)
+
+
+def _roll_dispatcher(a, shift, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_roll_dispatcher)
+def roll(a, shift, axis=None):
+ """
+ Roll array elements along a given axis.
+
+ Elements that roll beyond the last position are re-introduced at
+ the first.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ shift : int or tuple of ints
+ The number of places by which elements are shifted. If a tuple,
+ then `axis` must be a tuple of the same size, and each of the
+ given axes is shifted by the corresponding number. If an int
+ while `axis` is a tuple of ints, then the same value is used for
+ all given axes.
+ axis : int or tuple of ints, optional
+ Axis or axes along which elements are shifted. By default, the
+ array is flattened before shifting, after which the original
+ shape is restored.
+
+ Returns
+ -------
+ res : ndarray
+ Output array, with the same shape as `a`.
+
+ See Also
+ --------
+ rollaxis : Roll the specified axis backwards, until it lies in a
+ given position.
+
+ Notes
+ -----
+ .. versionadded:: 1.12.0
+
+ Supports rolling over multiple dimensions simultaneously.
+
+ Examples
+ --------
+ >>> x = np.arange(10)
+ >>> np.roll(x, 2)
+ array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
+
+ >>> x2 = np.reshape(x, (2,5))
+ >>> x2
+ array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
+ >>> np.roll(x2, 1)
+ array([[9, 0, 1, 2, 3],
+ [4, 5, 6, 7, 8]])
+ >>> np.roll(x2, 1, axis=0)
+ array([[5, 6, 7, 8, 9],
+ [0, 1, 2, 3, 4]])
+ >>> np.roll(x2, 1, axis=1)
+ array([[4, 0, 1, 2, 3],
+ [9, 5, 6, 7, 8]])
+
+ """
+ a = asanyarray(a)
+ if axis is None:
+ return roll(a.ravel(), shift, 0).reshape(a.shape)
+
+ else:
+ axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
+ broadcasted = broadcast(shift, axis)
+ if broadcasted.ndim > 1:
+ raise ValueError(
+ "'shift' and 'axis' should be scalars or 1D sequences")
+ shifts = {ax: 0 for ax in range(a.ndim)}
+ for sh, ax in broadcasted:
+ shifts[ax] += sh
+
+ rolls = [((slice(None), slice(None)),)] * a.ndim
+ for ax, offset in shifts.items():
+ offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
+ if offset:
+ # (original, result), (original, result)
+ rolls[ax] = ((slice(None, -offset), slice(offset, None)),
+ (slice(-offset, None), slice(None, offset)))
+
+ result = empty_like(a)
+ for indices in itertools.product(*rolls):
+ arr_index, res_index = zip(*indices)
+ result[res_index] = a[arr_index]
+
+ return result
+
+
+def _rollaxis_dispatcher(a, axis, start=None):
+ return (a,)
+
+
+@array_function_dispatch(_rollaxis_dispatcher)
+def rollaxis(a, axis, start=0):
+ """
+ Roll the specified axis backwards, until it lies in a given position.
+
+ This function continues to be supported for backward compatibility, but you
+ should prefer `moveaxis`. The `moveaxis` function was added in NumPy
+ 1.11.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ axis : int
+ The axis to roll backwards. The positions of the other axes do not
+ change relative to one another.
+ start : int, optional
+ The axis is rolled until it lies before this position. The default,
+ 0, results in a "complete" roll.
+
+ Returns
+ -------
+ res : ndarray
+ For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
+ NumPy versions a view of `a` is returned only if the order of the
+ axes is changed, otherwise the input array is returned.
+
+ See Also
+ --------
+ moveaxis : Move array axes to new positions.
+ roll : Roll the elements of an array by a number of positions along a
+ given axis.
+
+ Examples
+ --------
+ >>> a = np.ones((3,4,5,6))
+ >>> np.rollaxis(a, 3, 1).shape
+ (3, 6, 4, 5)
+ >>> np.rollaxis(a, 2).shape
+ (5, 3, 4, 6)
+ >>> np.rollaxis(a, 1, 4).shape
+ (3, 5, 6, 4)
+
+ """
+ n = a.ndim
+ axis = normalize_axis_index(axis, n)
+ if start < 0:
+ start += n
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
+ if not (0 <= start < n + 1):
+ raise AxisError(msg % ('start', -n, 'start', n + 1, start))
+ if axis < start:
+ # it's been removed
+ start -= 1
+ if axis == start:
+ return a[...]
+ axes = list(range(0, n))
+ axes.remove(axis)
+ axes.insert(start, axis)
+ return a.transpose(axes)
+
+
+def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
+ """
+ Normalizes an axis argument into a tuple of non-negative integer axes.
+
+ This handles shorthands such as ``1`` and converts them to ``(1,)``,
+ as well as performing the handling of negative indices covered by
+ `normalize_axis_index`.
+
+ By default, this forbids axes from being specified multiple times.
+
+ Used internally by multi-axis-checking logic.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ axis : int, iterable of int
+ The un-normalized index or indices of the axis.
+ ndim : int
+ The number of dimensions of the array that `axis` should be normalized
+ against.
+ argname : str, optional
+ A prefix to put before the error message, typically the name of the
+ argument.
+ allow_duplicate : bool, optional
+ If False, the default, disallow an axis from being specified twice.
+
+ Returns
+ -------
+ normalized_axes : tuple of int
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+ Raises
+ ------
+ AxisError
+ If any axis provided is out of range
+ ValueError
+ If an axis is repeated
+
+ See also
+ --------
+ normalize_axis_index : normalizing a single scalar axis
+ """
+ # Optimization to speed-up the most common cases.
+ if type(axis) not in (tuple, list):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ pass
+ # Going via an iterator directly is slower than via list comprehension.
+ axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
+ if not allow_duplicate and len(set(axis)) != len(axis):
+ if argname:
+ raise ValueError('repeated axis in `{}` argument'.format(argname))
+ else:
+ raise ValueError('repeated axis')
+ return axis
+
+
+def _moveaxis_dispatcher(a, source, destination):
+ return (a,)
+
+
+@array_function_dispatch(_moveaxis_dispatcher)
+def moveaxis(a, source, destination):
+ """
+ Move axes of an array to new positions.
+
+ Other axes remain in their original order.
+
+ .. versionadded:: 1.11.0
+
+ Parameters
+ ----------
+ a : np.ndarray
+ The array whose axes should be reordered.
+ source : int or sequence of int
+ Original positions of the axes to move. These must be unique.
+ destination : int or sequence of int
+ Destination positions for each of the original axes. These must also be
+ unique.
+
+ Returns
+ -------
+ result : np.ndarray
+ Array with moved axes. This array is a view of the input array.
+
+ See Also
+ --------
+ transpose: Permute the dimensions of an array.
+ swapaxes: Interchange two axes of an array.
+
+ Examples
+ --------
+
+ >>> x = np.zeros((3, 4, 5))
+ >>> np.moveaxis(x, 0, -1).shape
+ (4, 5, 3)
+ >>> np.moveaxis(x, -1, 0).shape
+ (5, 3, 4)
+
+ These all achieve the same result:
+
+ >>> np.transpose(x).shape
+ (5, 4, 3)
+ >>> np.swapaxes(x, 0, -1).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1], [-1, -2]).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
+ (5, 4, 3)
+
+ """
+ try:
+ # allow duck-array types if they define transpose
+ transpose = a.transpose
+ except AttributeError:
+ a = asarray(a)
+ transpose = a.transpose
+
+ source = normalize_axis_tuple(source, a.ndim, 'source')
+ destination = normalize_axis_tuple(destination, a.ndim, 'destination')
+ if len(source) != len(destination):
+ raise ValueError('`source` and `destination` arguments must have '
+ 'the same number of elements')
+
+ order = [n for n in range(a.ndim) if n not in source]
+
+ for dest, src in sorted(zip(destination, source)):
+ order.insert(dest, src)
+
+ result = transpose(order)
+ return result
+
+
+# fix hack in scipy which imports this function
+def _move_axis_to_0(a, axis):
+ return moveaxis(a, axis, 0)
+
+
+def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
+ return (a, b)
+
+
+@array_function_dispatch(_cross_dispatcher)
+def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
+ """
+ Return the cross product of two (arrays of) vectors.
+
+ The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
+ to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
+ are defined by the last axis of `a` and `b` by default, and these axes
+ can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
+ 2, the third component of the input vector is assumed to be zero and the
+ cross product calculated accordingly. In cases where both input vectors
+ have dimension 2, the z-component of the cross product is returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Components of the first vector(s).
+ b : array_like
+ Components of the second vector(s).
+ axisa : int, optional
+ Axis of `a` that defines the vector(s). By default, the last axis.
+ axisb : int, optional
+ Axis of `b` that defines the vector(s). By default, the last axis.
+ axisc : int, optional
+ Axis of `c` containing the cross product vector(s). Ignored if
+ both input vectors have dimension 2, as the return is scalar.
+ By default, the last axis.
+ axis : int, optional
+ If defined, the axis of `a`, `b` and `c` that defines the vector(s)
+ and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
+
+ Returns
+ -------
+ c : ndarray
+ Vector cross product(s).
+
+ Raises
+ ------
+ ValueError
+ When the dimension of the vector(s) in `a` and/or `b` does not
+ equal 2 or 3.
+
+ See Also
+ --------
+ inner : Inner product
+ outer : Outer product.
+ ix_ : Construct index arrays.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ Supports full broadcasting of the inputs.
+
+ Examples
+ --------
+ Vector cross-product.
+
+ >>> x = [1, 2, 3]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([-3, 6, -3])
+
+ One vector with dimension 2.
+
+ >>> x = [1, 2]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([12, -6, -3])
+
+ Equivalently:
+
+ >>> x = [1, 2, 0]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([12, -6, -3])
+
+ Both vectors with dimension 2.
+
+ >>> x = [1,2]
+ >>> y = [4,5]
+ >>> np.cross(x, y)
+ -3
+
+ Multiple vector cross-products. Note that the direction of the cross
+ product vector is defined by the `right-hand rule`.
+
+ >>> x = np.array([[1,2,3], [4,5,6]])
+ >>> y = np.array([[4,5,6], [1,2,3]])
+ >>> np.cross(x, y)
+ array([[-3, 6, -3],
+ [ 3, -6, 3]])
+
+ The orientation of `c` can be changed using the `axisc` keyword.
+
+ >>> np.cross(x, y, axisc=0)
+ array([[-3, 3],
+ [ 6, -6],
+ [-3, 3]])
+
+ Change the vector definition of `x` and `y` using `axisa` and `axisb`.
+
+ >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
+ >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
+ >>> np.cross(x, y)
+ array([[ -6, 12, -6],
+ [ 0, 0, 0],
+ [ 6, -12, 6]])
+ >>> np.cross(x, y, axisa=0, axisb=0)
+ array([[-24, 48, -24],
+ [-30, 60, -30],
+ [-36, 72, -36]])
+
+ """
+ if axis is not None:
+ axisa, axisb, axisc = (axis,) * 3
+ a = asarray(a)
+ b = asarray(b)
+ # Check axisa and axisb are within bounds
+ axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
+ axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
+
+ # Move working axis to the end of the shape
+ a = moveaxis(a, axisa, -1)
+ b = moveaxis(b, axisb, -1)
+ msg = ("incompatible dimensions for cross product\n"
+ "(dimension must be 2 or 3)")
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
+ raise ValueError(msg)
+
+ # Create the output array
+ shape = broadcast(a[..., 0], b[..., 0]).shape
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
+ shape += (3,)
+ # Check axisc is within bounds
+ axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
+ dtype = promote_types(a.dtype, b.dtype)
+ cp = empty(shape, dtype)
+
+ # create local aliases for readability
+ a0 = a[..., 0]
+ a1 = a[..., 1]
+ if a.shape[-1] == 3:
+ a2 = a[..., 2]
+ b0 = b[..., 0]
+ b1 = b[..., 1]
+ if b.shape[-1] == 3:
+ b2 = b[..., 2]
+ if cp.ndim != 0 and cp.shape[-1] == 3:
+ cp0 = cp[..., 0]
+ cp1 = cp[..., 1]
+ cp2 = cp[..., 2]
+
+ if a.shape[-1] == 2:
+ if b.shape[-1] == 2:
+ # a0 * b1 - a1 * b0
+ multiply(a0, b1, out=cp)
+ cp -= a1 * b0
+ return cp
+ else:
+ assert b.shape[-1] == 3
+ # cp0 = a1 * b2 - 0 (a2 = 0)
+ # cp1 = 0 - a0 * b2 (a2 = 0)
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a1, b2, out=cp0)
+ multiply(a0, b2, out=cp1)
+ negative(cp1, out=cp1)
+ multiply(a0, b1, out=cp2)
+ cp2 -= a1 * b0
+ else:
+ assert a.shape[-1] == 3
+ if b.shape[-1] == 3:
+ # cp0 = a1 * b2 - a2 * b1
+ # cp1 = a2 * b0 - a0 * b2
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a1, b2, out=cp0)
+ tmp = array(a2 * b1)
+ cp0 -= tmp
+ multiply(a2, b0, out=cp1)
+ multiply(a0, b2, out=tmp)
+ cp1 -= tmp
+ multiply(a0, b1, out=cp2)
+ multiply(a1, b0, out=tmp)
+ cp2 -= tmp
+ else:
+ assert b.shape[-1] == 2
+ # cp0 = 0 - a2 * b1 (b2 = 0)
+ # cp1 = a2 * b0 - 0 (b2 = 0)
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a2, b1, out=cp0)
+ negative(cp0, out=cp0)
+ multiply(a2, b0, out=cp1)
+ multiply(a0, b1, out=cp2)
+ cp2 -= a1 * b0
+
+ return moveaxis(cp, -1, axisc)
+
+
+little_endian = (sys.byteorder == 'little')
+
+
+@set_module('numpy')
+def indices(dimensions, dtype=int):
+ """
+ Return an array representing the indices of a grid.
+
+ Compute an array where the subarrays contain index values 0,1,...
+ varying only along the corresponding axis.
+
+ Parameters
+ ----------
+ dimensions : sequence of ints
+ The shape of the grid.
+ dtype : dtype, optional
+ Data type of the result.
+
+ Returns
+ -------
+ grid : ndarray
+ The array of grid indices,
+ ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+
+ See Also
+ --------
+ mgrid, meshgrid
+
+ Notes
+ -----
+ The output shape is obtained by prepending the number of dimensions
+ in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
+ ``(r0, ..., rN-1)`` of length ``N``, the output shape is
+ ``(N,r0,...,rN-1)``.
+
+ The subarrays ``grid[k]`` contains the N-D array of indices along the
+ ``k-th`` axis. Explicitly::
+
+ grid[k,i0,i1,...,iN-1] = ik
+
+ Examples
+ --------
+ >>> grid = np.indices((2, 3))
+ >>> grid.shape
+ (2, 2, 3)
+ >>> grid[0] # row indices
+ array([[0, 0, 0],
+ [1, 1, 1]])
+ >>> grid[1] # column indices
+ array([[0, 1, 2],
+ [0, 1, 2]])
+
+ The indices can be used as an index into an array.
+
+ >>> x = np.arange(20).reshape(5, 4)
+ >>> row, col = np.indices((2, 3))
+ >>> x[row, col]
+ array([[0, 1, 2],
+ [4, 5, 6]])
+
+ Note that it would be more straightforward in the above example to
+ extract the required elements directly with ``x[:2, :3]``.
+
+ """
+ dimensions = tuple(dimensions)
+ N = len(dimensions)
+ shape = (1,)*N
+ res = empty((N,)+dimensions, dtype=dtype)
+ for i, dim in enumerate(dimensions):
+ res[i] = arange(dim, dtype=dtype).reshape(
+ shape[:i] + (dim,) + shape[i+1:]
+ )
+ return res
+
+
+@set_module('numpy')
+def fromfunction(function, shape, **kwargs):
+ """
+ Construct an array by executing a function over each coordinate.
+
+ The resulting array therefore has a value ``fn(x, y, z)`` at
+ coordinate ``(x, y, z)``.
+
+ Parameters
+ ----------
+ function : callable
+ The function is called with N parameters, where N is the rank of
+ `shape`. Each parameter represents the coordinates of the array
+ varying along a specific axis. For example, if `shape`
+ were ``(2, 2)``, then the parameters would be
+ ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
+ shape : (N,) tuple of ints
+ Shape of the output array, which also determines the shape of
+ the coordinate arrays passed to `function`.
+ dtype : data-type, optional
+ Data-type of the coordinate arrays passed to `function`.
+ By default, `dtype` is float.
+
+ Returns
+ -------
+ fromfunction : any
+ The result of the call to `function` is passed back directly.
+ Therefore the shape of `fromfunction` is completely determined by
+ `function`. If `function` returns a scalar value, the shape of
+ `fromfunction` would not match the `shape` parameter.
+
+ See Also
+ --------
+ indices, meshgrid
+
+ Notes
+ -----
+ Keywords other than `dtype` are passed to `function`.
+
+ Examples
+ --------
+ >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
+ array([[ True, False, False],
+ [False, True, False],
+ [False, False, True]])
+
+ >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4]])
+
+ """
+ dtype = kwargs.pop('dtype', float)
+ args = indices(shape, dtype=dtype)
+ return function(*args, **kwargs)
+
+
+def _frombuffer(buf, dtype, shape, order):
+ return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
+
+
+@set_module('numpy')
+def isscalar(num):
+ """
+ Returns True if the type of `num` is a scalar type.
+
+ Parameters
+ ----------
+ num : any
+ Input argument, can be of any type and shape.
+
+ Returns
+ -------
+ val : bool
+ True if `num` is a scalar type, False if it is not.
+
+ See Also
+ --------
+ ndim : Get the number of dimensions of an array
+
+ Notes
+ -----
+ In almost all cases ``np.ndim(x) == 0`` should be used instead of this
+ function, as that will also return true for 0d arrays. This is how
+ numpy overloads functions in the style of the ``dx`` arguments to `gradient`
+ and the ``bins`` argument to `histogram`. Some key differences:
+
+ +--------------------------------------+---------------+-------------------+
+ | x |``isscalar(x)``|``np.ndim(x) == 0``|
+ +======================================+===============+===================+
+ | PEP 3141 numeric objects (including | ``True`` | ``True`` |
+ | builtins) | | |
+ +--------------------------------------+---------------+-------------------+
+ | builtin string and buffer objects | ``True`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other builtin objects, like | ``False`` | ``True`` |
+ | `pathlib.Path`, `Exception`, | | |
+ | the result of `re.compile` | | |
+ +--------------------------------------+---------------+-------------------+
+ | third-party objects like | ``False`` | ``True`` |
+ | `matplotlib.figure.Figure` | | |
+ +--------------------------------------+---------------+-------------------+
+ | zero-dimensional numpy arrays | ``False`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other numpy arrays | ``False`` | ``False`` |
+ +--------------------------------------+---------------+-------------------+
+ | `list`, `tuple`, and other sequence | ``False`` | ``False`` |
+ | objects | | |
+ +--------------------------------------+---------------+-------------------+
+
+ Examples
+ --------
+ >>> np.isscalar(3.1)
+ True
+ >>> np.isscalar(np.array(3.1))
+ False
+ >>> np.isscalar([3.1])
+ False
+ >>> np.isscalar(False)
+ True
+ >>> np.isscalar('numpy')
+ True
+
+ NumPy supports PEP 3141 numbers:
+
+ >>> from fractions import Fraction
+ >>> isscalar(Fraction(5, 17))
+ True
+ >>> from numbers import Number
+ >>> isscalar(Number())
+ True
+
+ """
+ return (isinstance(num, generic)
+ or type(num) in ScalarType
+ or isinstance(num, numbers.Number))
+
+
+@set_module('numpy')
+def binary_repr(num, width=None):
+ """
+ Return the binary representation of the input number as a string.
+
+ For negative numbers, if width is not given, a minus sign is added to the
+ front. If width is given, the two's complement of the number is
+ returned, with respect to that width.
+
+ In a two's-complement system negative numbers are represented by the two's
+ complement of the absolute value. This is the most common method of
+ representing signed integers on computers [1]_. A N-bit two's-complement
+ system can represent every integer in the range
+ :math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
+
+ Parameters
+ ----------
+ num : int
+ Only an integer decimal number can be used.
+ width : int, optional
+ The length of the returned string if `num` is positive, or the length
+ of the two's complement if `num` is negative, provided that `width` is
+ at least a sufficient number of bits for `num` to be represented in the
+ designated form.
+
+ If the `width` value is insufficient, it will be ignored, and `num` will
+ be returned in binary (`num` > 0) or two's complement (`num` < 0) form
+ with its width equal to the minimum number of bits needed to represent
+ the number in the designated form. This behavior is deprecated and will
+ later raise an error.
+
+ .. deprecated:: 1.12.0
+
+ Returns
+ -------
+ bin : str
+ Binary representation of `num` or two's complement of `num`.
+
+ See Also
+ --------
+ base_repr: Return a string representation of a number in the given base
+ system.
+ bin: Python's built-in binary representation generator of an integer.
+
+ Notes
+ -----
+ `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
+ faster.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Two's complement",
+ https://en.wikipedia.org/wiki/Two's_complement
+
+ Examples
+ --------
+ >>> np.binary_repr(3)
+ '11'
+ >>> np.binary_repr(-3)
+ '-11'
+ >>> np.binary_repr(3, width=4)
+ '0011'
+
+ The two's complement is returned when the input number is negative and
+ width is specified:
+
+ >>> np.binary_repr(-3, width=3)
+ '101'
+ >>> np.binary_repr(-3, width=5)
+ '11101'
+
+ """
+ def warn_if_insufficient(width, binwidth):
+ if width is not None and width < binwidth:
+ warnings.warn(
+ "Insufficient bit width provided. This behavior "
+ "will raise an error in the future.", DeprecationWarning,
+ stacklevel=3)
+
+ if num == 0:
+ return '0' * (width or 1)
+
+ elif num > 0:
+ binary = bin(num)[2:]
+ binwidth = len(binary)
+ outwidth = (binwidth if width is None
+ else max(binwidth, width))
+ warn_if_insufficient(width, binwidth)
+ return binary.zfill(outwidth)
+
+ else:
+ if width is None:
+ return '-' + bin(-num)[2:]
+
+ else:
+ poswidth = len(bin(-num)[2:])
+
+ # See gh-8679: remove extra digit
+ # for numbers at boundaries.
+ if 2**(poswidth - 1) == -num:
+ poswidth -= 1
+
+ twocomp = 2**(poswidth + 1) + num
+ binary = bin(twocomp)[2:]
+ binwidth = len(binary)
+
+ outwidth = max(binwidth, width)
+ warn_if_insufficient(width, binwidth)
+ return '1' * (outwidth - binwidth) + binary
+
+
+@set_module('numpy')
+def base_repr(number, base=2, padding=0):
+ """
+ Return a string representation of a number in the given base system.
+
+ Parameters
+ ----------
+ number : int
+ The value to convert. Positive and negative values are handled.
+ base : int, optional
+ Convert `number` to the `base` number system. The valid range is 2-36,
+ the default value is 2.
+ padding : int, optional
+ Number of zeros padded on the left. Default is 0 (no padding).
+
+ Returns
+ -------
+ out : str
+ String representation of `number` in `base` system.
+
+ See Also
+ --------
+ binary_repr : Faster version of `base_repr` for base 2.
+
+ Examples
+ --------
+ >>> np.base_repr(5)
+ '101'
+ >>> np.base_repr(6, 5)
+ '11'
+ >>> np.base_repr(7, base=5, padding=3)
+ '00012'
+
+ >>> np.base_repr(10, base=16)
+ 'A'
+ >>> np.base_repr(32, base=16)
+ '20'
+
+ """
+ digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ if base > len(digits):
+ raise ValueError("Bases greater than 36 not handled in base_repr.")
+ elif base < 2:
+ raise ValueError("Bases less than 2 not handled in base_repr.")
+
+ num = abs(number)
+ res = []
+ while num:
+ res.append(digits[num % base])
+ num //= base
+ if padding:
+ res.append('0' * padding)
+ if number < 0:
+ res.append('-')
+ return ''.join(reversed(res or '0'))
+
+
+def load(file):
+ """
+ Wrapper around cPickle.load which accepts either a file-like object or
+ a filename.
+
+ Note that the NumPy binary format is not based on pickle/cPickle anymore.
+ For details on the preferred way of loading and saving files, see `load`
+ and `save`.
+
+ See Also
+ --------
+ load, save
+
+ """
+ # NumPy 1.15.0, 2017-12-10
+ warnings.warn(
+ "np.core.numeric.load is deprecated, use pickle.load instead",
+ DeprecationWarning, stacklevel=2)
+ if isinstance(file, type("")):
+ file = open(file, "rb")
+ return pickle.load(file)
+
+
+# These are all essentially abbreviations
+# These might wind up in a special abbreviations module
+
+
+def _maketup(descr, val):
+ dt = dtype(descr)
+ # Place val in all scalar tuples:
+ fields = dt.fields
+ if fields is None:
+ return val
+ else:
+ res = [_maketup(fields[name][0], val) for name in dt.names]
+ return tuple(res)
+
+
+@set_module('numpy')
+def identity(n, dtype=None):
+ """
+ Return the identity array.
+
+ The identity array is a square array with ones on
+ the main diagonal.
+
+ Parameters
+ ----------
+ n : int
+ Number of rows (and columns) in `n` x `n` output.
+ dtype : data-type, optional
+ Data-type of the output. Defaults to ``float``.
+
+ Returns
+ -------
+ out : ndarray
+ `n` x `n` array with its main diagonal set to one,
+ and all other elements 0.
+
+ Examples
+ --------
+ >>> np.identity(3)
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ """
+ from numpy import eye
+ return eye(n, dtype=dtype)
+
+
+def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_allclose_dispatcher)
+def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+ """
+ Returns True if two arrays are element-wise equal within a tolerance.
+
+ The tolerance values are positive, typically very small numbers. The
+ relative difference (`rtol` * abs(`b`)) and the absolute difference
+ `atol` are added together to compare against the absolute difference
+ between `a` and `b`.
+
+ If either array contains one or more NaNs, False is returned.
+ Infs are treated as equal if they are in the same place and of the same
+ sign in both arrays.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ rtol : float
+ The relative tolerance parameter (see Notes).
+ atol : float
+ The absolute tolerance parameter (see Notes).
+ equal_nan : bool
+ Whether to compare NaN's as equal. If True, NaN's in `a` will be
+ considered equal to NaN's in `b` in the output array.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ allclose : bool
+ Returns True if the two arrays are equal within the given
+ tolerance; False otherwise.
+
+ See Also
+ --------
+ isclose, all, any, equal
+
+ Notes
+ -----
+ If the following equation is element-wise True, then allclose returns
+ True.
+
+ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+ The above equation is not symmetric in `a` and `b`, so that
+ ``allclose(a, b)`` might be different from ``allclose(b, a)`` in
+ some rare cases.
+
+ The comparison of `a` and `b` uses standard broadcasting, which
+ means that `a` and `b` need not have the same shape in order for
+ ``allclose(a, b)`` to evaluate to True. The same is true for
+ `equal` but not `array_equal`.
+
+ Examples
+ --------
+ >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
+ False
+ >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
+ True
+ >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
+ False
+ >>> np.allclose([1.0, np.nan], [1.0, np.nan])
+ False
+ >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+ True
+
+ """
+ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
+ return bool(res)
+
+
+def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_isclose_dispatcher)
+def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+ """
+ Returns a boolean array where two arrays are element-wise equal within a
+ tolerance.
+
+ The tolerance values are positive, typically very small numbers. The
+ relative difference (`rtol` * abs(`b`)) and the absolute difference
+ `atol` are added together to compare against the absolute difference
+ between `a` and `b`.
+
+ .. warning:: The default `atol` is not appropriate for comparing numbers
+ that are much smaller than one (see Notes).
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ rtol : float
+ The relative tolerance parameter (see Notes).
+ atol : float
+ The absolute tolerance parameter (see Notes).
+ equal_nan : bool
+ Whether to compare NaN's as equal. If True, NaN's in `a` will be
+ considered equal to NaN's in `b` in the output array.
+
+ Returns
+ -------
+ y : array_like
+ Returns a boolean array of where `a` and `b` are equal within the
+ given tolerance. If both `a` and `b` are scalars, returns a single
+ boolean value.
+
+ See Also
+ --------
+ allclose
+
+ Notes
+ -----
+ .. versionadded:: 1.7.0
+
+ For finite values, isclose uses the following equation to test whether
+ two floating point values are equivalent.
+
+ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+ Unlike the built-in `math.isclose`, the above equation is not symmetric
+ in `a` and `b` -- it assumes `b` is the reference value -- so that
+ `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
+ the default value of atol is not zero, and is used to determine what
+ small values should be considered close to zero. The default value is
+ appropriate for expected values of order unity: if the expected values
+ are significantly smaller than one, it can result in false positives.
+ `atol` should be carefully selected for the use case at hand. A zero value
+ for `atol` will result in `False` if either `a` or `b` is zero.
+
+ Examples
+ --------
+ >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
+ array([True, False])
+ >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
+ array([True, True])
+ >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
+ array([False, True])
+ >>> np.isclose([1.0, np.nan], [1.0, np.nan])
+ array([True, False])
+ >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+ array([True, True])
+ >>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
+ array([ True, False], dtype=bool)
+ >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
+ array([False, False], dtype=bool)
+ >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
+ array([ True, True], dtype=bool)
+ >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
+ array([False, True], dtype=bool)
+ """
+ def within_tol(x, y, atol, rtol):
+ with errstate(invalid='ignore'):
+ return less_equal(abs(x-y), atol + rtol * abs(y))
+
+ x = asanyarray(a)
+ y = asanyarray(b)
+
+ # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+ # This will cause casting of x later. Also, make sure to allow subclasses
+ # (e.g., for numpy.ma).
+ dt = multiarray.result_type(y, 1.)
+ y = array(y, dtype=dt, copy=False, subok=True)
+
+ xfin = isfinite(x)
+ yfin = isfinite(y)
+ if all(xfin) and all(yfin):
+ return within_tol(x, y, atol, rtol)
+ else:
+ finite = xfin & yfin
+ cond = zeros_like(finite, subok=True)
+ # Because we're using boolean indexing, x & y must be the same shape.
+ # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
+ # lib.stride_tricks, though, so we can't import it here.
+ x = x * ones_like(cond)
+ y = y * ones_like(cond)
+ # Avoid subtraction with infinite/nan values...
+ cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
+ # Check for equality of infinite values...
+ cond[~finite] = (x[~finite] == y[~finite])
+ if equal_nan:
+ # Make NaN == NaN
+ both_nan = isnan(x) & isnan(y)
+
+ # Needed to treat masked arrays correctly. = True would not work.
+ cond[both_nan] = both_nan[both_nan]
+
+ return cond[()] # Flatten 0d arrays to scalars
+
+
+def _array_equal_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equal_dispatcher)
+def array_equal(a1, a2):
+ """
+ True if two arrays have the same shape and elements, False otherwise.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Input arrays.
+
+ Returns
+ -------
+ b : bool
+ Returns True if the arrays are equal.
+
+ See Also
+ --------
+ allclose: Returns True if two arrays are element-wise equal within a
+ tolerance.
+ array_equiv: Returns True if input arrays are shape consistent and all
+ elements equal.
+
+ Examples
+ --------
+ >>> np.array_equal([1, 2], [1, 2])
+ True
+ >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
+ True
+ >>> np.array_equal([1, 2], [1, 2, 3])
+ False
+ >>> np.array_equal([1, 2], [1, 4])
+ False
+
+ """
+ try:
+ a1, a2 = asarray(a1), asarray(a2)
+ except Exception:
+ return False
+ if a1.shape != a2.shape:
+ return False
+ return bool(asarray(a1 == a2).all())
+
+
+def _array_equiv_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equiv_dispatcher)
+def array_equiv(a1, a2):
+ """
+ Returns True if input arrays are shape consistent and all elements equal.
+
+ Shape consistent means they are either the same shape, or one input array
+ can be broadcasted to create the same shape as the other one.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Input arrays.
+
+ Returns
+ -------
+ out : bool
+ True if equivalent, False otherwise.
+
+ Examples
+ --------
+ >>> np.array_equiv([1, 2], [1, 2])
+ True
+ >>> np.array_equiv([1, 2], [1, 3])
+ False
+
+ Showing the shape equivalence:
+
+ >>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
+ True
+ >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
+ False
+
+ >>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
+ False
+
+ """
+ try:
+ a1, a2 = asarray(a1), asarray(a2)
+ except Exception:
+ return False
+ try:
+ multiarray.broadcast(a1, a2)
+ except Exception:
+ return False
+
+ return bool(asarray(a1 == a2).all())
+
+
+_errdict = {"ignore": ERR_IGNORE,
+ "warn": ERR_WARN,
+ "raise": ERR_RAISE,
+ "call": ERR_CALL,
+ "print": ERR_PRINT,
+ "log": ERR_LOG}
+
+_errdict_rev = {value: key for key, value in _errdict.items()}
+
+
+@set_module('numpy')
+def seterr(all=None, divide=None, over=None, under=None, invalid=None):
+ """
+ Set how floating-point errors are handled.
+
+ Note that operations on integer scalar types (such as `int16`) are
+ handled like floating point, and are affected by these settings.
+
+ Parameters
+ ----------
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Set treatment for all types of floating-point errors at once:
+
+ - ignore: Take no action when the exception occurs.
+ - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
+ - raise: Raise a `FloatingPointError`.
+ - call: Call a function specified using the `seterrcall` function.
+ - print: Print a warning directly to ``stdout``.
+ - log: Record error in a Log object specified by `seterrcall`.
+
+ The default is not to change the current behavior.
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for division by zero.
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point overflow.
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point underflow.
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for invalid floating-point operation.
+
+ Returns
+ -------
+ old_settings : dict
+ Dictionary containing the old settings.
+
+ See also
+ --------
+ seterrcall : Set a callback function for the 'call' mode.
+ geterr, geterrcall, errstate
+
+ Notes
+ -----
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
+
+ - Division by zero: infinite result obtained from finite numbers.
+ - Overflow: result too large to be expressed.
+ - Underflow: result so close to zero that some precision
+ was lost.
+ - Invalid operation: result is not an expressible number, typically
+ indicates that a NaN was produced.
+
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
+
+ Examples
+ --------
+ >>> old_settings = np.seterr(all='ignore') #seterr to known value
+ >>> np.seterr(over='raise')
+ {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
+ 'under': 'ignore'}
+ >>> np.seterr(**old_settings) # reset to default
+ {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore',
+ 'under': 'ignore'}
+
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+ >>> old_settings = np.seterr(all='warn', over='raise')
+ >>> np.int16(32000) * np.int16(3)
+ Traceback (most recent call last):
+ File "", line 1, in
+ FloatingPointError: overflow encountered in short_scalars
+
+ >>> old_settings = np.seterr(all='print')
+ >>> np.geterr()
+ {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
+ >>> np.int16(32000) * np.int16(3)
+ Warning: overflow encountered in short_scalars
+ 30464
+
+ """
+
+ pyvals = umath.geterrobj()
+ old = geterr()
+
+ if divide is None:
+ divide = all or old['divide']
+ if over is None:
+ over = all or old['over']
+ if under is None:
+ under = all or old['under']
+ if invalid is None:
+ invalid = all or old['invalid']
+
+ maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
+ (_errdict[over] << SHIFT_OVERFLOW) +
+ (_errdict[under] << SHIFT_UNDERFLOW) +
+ (_errdict[invalid] << SHIFT_INVALID))
+
+ pyvals[1] = maskvalue
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterr():
+ """
+ Get the current way of handling floating-point errors.
+
+ Returns
+ -------
+ res : dict
+ A dictionary with keys "divide", "over", "under", and "invalid",
+ whose values are from the strings "ignore", "print", "log", "warn",
+ "raise", and "call". The keys represent possible floating-point
+ exceptions, and the values define how these exceptions are handled.
+
+ See Also
+ --------
+ geterrcall, seterr, seterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterr()
+ {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
+ 'under': 'ignore'}
+ >>> np.arange(3.) / np.arange(3.)
+ array([ NaN, 1., 1.])
+
+ >>> oldsettings = np.seterr(all='warn', over='raise')
+ >>> np.geterr()
+ {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
+ >>> np.arange(3.) / np.arange(3.)
+ __main__:1: RuntimeWarning: invalid value encountered in divide
+ array([ NaN, 1., 1.])
+
+ """
+ maskvalue = umath.geterrobj()[1]
+ mask = 7
+ res = {}
+ val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
+ res['divide'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_OVERFLOW) & mask
+ res['over'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_UNDERFLOW) & mask
+ res['under'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_INVALID) & mask
+ res['invalid'] = _errdict_rev[val]
+ return res
+
+
+@set_module('numpy')
+def setbufsize(size):
+ """
+ Set the size of the buffer used in ufuncs.
+
+ Parameters
+ ----------
+ size : int
+ Size of buffer.
+
+ """
+ if size > 10e6:
+ raise ValueError("Buffer size, %s, is too big." % size)
+ if size < 5:
+ raise ValueError("Buffer size, %s, is too small." % size)
+ if size % 16 != 0:
+ raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
+
+ pyvals = umath.geterrobj()
+ old = getbufsize()
+ pyvals[0] = size
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def getbufsize():
+ """
+ Return the size of the buffer used in ufuncs.
+
+ Returns
+ -------
+ getbufsize : int
+ Size of ufunc buffer in bytes.
+
+ """
+ return umath.geterrobj()[0]
+
+
+@set_module('numpy')
+def seterrcall(func):
+ """
+ Set the floating-point error callback function or log object.
+
+ There are two ways to capture floating-point error messages. The first
+ is to set the error-handler to 'call', using `seterr`. Then, set
+ the function to call using this function.
+
+ The second is to set the error-handler to 'log', using `seterr`.
+ Floating-point errors then trigger a call to the 'write' method of
+ the provided object.
+
+ Parameters
+ ----------
+ func : callable f(err, flag) or object with write method
+ Function to call upon floating-point errors ('call'-mode) or
+ object whose 'write' method is used to log such message ('log'-mode).
+
+ The call function takes two arguments. The first is a string describing
+ the type of error (such as "divide by zero", "overflow", "underflow",
+ or "invalid value"), and the second is the status flag. The flag is a
+ byte, whose four least-significant bits indicate the type of error, one
+ of "divide", "over", "under", "invalid"::
+
+ [0 0 0 0 divide over under invalid]
+
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
+
+ If an object is provided, its write method should take one argument,
+ a string.
+
+ Returns
+ -------
+ h : callable, log instance or None
+ The old error handler.
+
+ See Also
+ --------
+ seterr, geterr, geterrcall
+
+ Examples
+ --------
+ Callback upon error:
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+
+ >>> saved_handler = np.seterrcall(err_handler)
+ >>> save_err = np.seterr(all='call')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([ Inf, Inf, Inf])
+
+ >>> np.seterrcall(saved_handler)
+
+ >>> np.seterr(**save_err)
+ {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
+
+ Log error message:
+
+ >>> class Log(object):
+ ... def write(self, msg):
+ ... print("LOG: %s" % msg)
+ ...
+
+ >>> log = Log()
+ >>> saved_handler = np.seterrcall(log)
+ >>> save_err = np.seterr(all='log')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ LOG: Warning: divide by zero encountered in divide
+
+ array([ Inf, Inf, Inf])
+
+ >>> np.seterrcall(saved_handler)
+ <__main__.Log object at 0x...>
+ >>> np.seterr(**save_err)
+ {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
+
+ """
+ if func is not None and not isinstance(func, collections_abc.Callable):
+ if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable):
+ raise ValueError("Only callable can be used as callback")
+ pyvals = umath.geterrobj()
+ old = geterrcall()
+ pyvals[2] = func
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterrcall():
+ """
+ Return the current callback function used on floating-point errors.
+
+ When the error handling for a floating-point error (one of "divide",
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
+ that is called or the log instance that is written to is returned by
+ `geterrcall`. This function or log instance has been set with
+ `seterrcall`.
+
+ Returns
+ -------
+ errobj : callable, log instance or None
+ The current error handler. If no handler was set through `seterrcall`,
+ ``None`` is returned.
+
+ See Also
+ --------
+ seterrcall, seterr, geterr
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrcall() # we did not yet set a handler, returns None
+
+ >>> oldsettings = np.seterr(all='call')
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ >>> oldhandler = np.seterrcall(err_handler)
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([ Inf, Inf, Inf])
+
+ >>> cur_handler = np.geterrcall()
+ >>> cur_handler is err_handler
+ True
+
+ """
+ return umath.geterrobj()[2]
+
+
+class _unspecified(object):
+ pass
+
+
+_Unspecified = _unspecified()
+
+
+@set_module('numpy')
+class errstate(object):
+ """
+ errstate(**kwargs)
+
+ Context manager for floating-point error handling.
+
+ Using an instance of `errstate` as a context manager allows statements in
+ that context to execute with a known error handling behavior. Upon entering
+ the context the error handling is set with `seterr` and `seterrcall`, and
+ upon exiting it is reset to what it was before.
+
+ Parameters
+ ----------
+ kwargs : {divide, over, under, invalid}
+ Keyword arguments. The valid keywords are the possible floating-point
+ exceptions. Each keyword should have a string value that defines the
+ treatment for the particular error. Possible values are
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
+
+ See Also
+ --------
+ seterr, geterr, seterrcall, geterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
+
+ >>> np.arange(3) / 0.
+ array([ NaN, Inf, Inf])
+ >>> with np.errstate(divide='warn'):
+ ... np.arange(3) / 0.
+ ...
+ __main__:2: RuntimeWarning: divide by zero encountered in divide
+ array([ NaN, Inf, Inf])
+
+ >>> np.sqrt(-1)
+ nan
+ >>> with np.errstate(invalid='raise'):
+ ... np.sqrt(-1)
+ Traceback (most recent call last):
+ File "", line 2, in
+ FloatingPointError: invalid value encountered in sqrt
+
+ Outside the context the error handling behavior has not changed:
+
+ >>> np.geterr()
+ {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
+ 'under': 'ignore'}
+
+ """
+ # Note that we don't want to run the above doctests because they will fail
+ # without a from __future__ import with_statement
+
+ def __init__(self, **kwargs):
+ self.call = kwargs.pop('call', _Unspecified)
+ self.kwargs = kwargs
+
+ def __enter__(self):
+ self.oldstate = seterr(**self.kwargs)
+ if self.call is not _Unspecified:
+ self.oldcall = seterrcall(self.call)
+
+ def __exit__(self, *exc_info):
+ seterr(**self.oldstate)
+ if self.call is not _Unspecified:
+ seterrcall(self.oldcall)
+
+
+def _setdef():
+ defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+ umath.seterrobj(defval)
+
+
+# set the default values
+_setdef()
+
+Inf = inf = infty = Infinity = PINF
+nan = NaN = NAN
+False_ = bool_(False)
+True_ = bool_(True)
+
+
+def extend_all(module):
+ existing = set(__all__)
+ mall = getattr(module, '__all__')
+ for a in mall:
+ if a not in existing:
+ __all__.append(a)
+
+
+from .umath import *
+from .numerictypes import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
+extend_all(fromnumeric)
+extend_all(umath)
+extend_all(numerictypes)
+extend_all(arrayprint)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc
new file mode 100644
index 0000000..51516e5
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/numeric.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py
new file mode 100644
index 0000000..f00f922
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.py
@@ -0,0 +1,675 @@
+"""
+numerictypes: Define the numeric type objects
+
+This module is designed so "from numerictypes import \\*" is safe.
+Exported symbols include:
+
+ Dictionary with all registered number types (including aliases):
+ typeDict
+
+ Type objects (not all will be available, depends on platform):
+ see variable sctypes for which ones you have
+
+ Bit-width names
+
+ int8 int16 int32 int64 int128
+ uint8 uint16 uint32 uint64 uint128
+ float16 float32 float64 float96 float128 float256
+ complex32 complex64 complex128 complex192 complex256 complex512
+ datetime64 timedelta64
+
+ c-based names
+
+ bool_
+
+ object_
+
+ void, str_, unicode_
+
+ byte, ubyte,
+ short, ushort
+ intc, uintc,
+ intp, uintp,
+ int_, uint,
+ longlong, ulonglong,
+
+ single, csingle,
+ float_, complex_,
+ longfloat, clongfloat,
+
+ As part of the type-hierarchy: xx -- is bit-width
+
+ generic
+ +-> bool_ (kind=b)
+ +-> number
+ | +-> integer
+ | | +-> signedinteger (intxx) (kind=i)
+ | | | byte
+ | | | short
+ | | | intc
+ | | | intp int0
+ | | | int_
+ | | | longlong
+ | | \\-> unsignedinteger (uintxx) (kind=u)
+ | | ubyte
+ | | ushort
+ | | uintc
+ | | uintp uint0
+ | | uint_
+ | | ulonglong
+ | +-> inexact
+ | +-> floating (floatxx) (kind=f)
+ | | half
+ | | single
+ | | float_ (double)
+ | | longfloat
+ | \\-> complexfloating (complexxx) (kind=c)
+ | csingle (singlecomplex)
+ | complex_ (cfloat, cdouble)
+ | clongfloat (longcomplex)
+ +-> flexible
+ | +-> character
+ | | str_ (string_, bytes_) (kind=S) [Python 2]
+ | | unicode_ (kind=U) [Python 2]
+ | |
+ | | bytes_ (string_) (kind=S) [Python 3]
+ | | str_ (unicode_) (kind=U) [Python 3]
+ | |
+ | \\-> void (kind=V)
+ \\-> object_ (not used much) (kind=O)
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import types as _types
+import sys
+import numbers
+import warnings
+
+from numpy.compat import bytes, long
+from numpy.core.multiarray import (
+ typeinfo, ndarray, array, empty, dtype, datetime_data,
+ datetime_as_string, busday_offset, busday_count, is_busday,
+ busdaycalendar
+ )
+from numpy.core.overrides import set_module
+
+# we add more at the bottom
+__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
+ 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
+ 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
+ 'issubdtype', 'datetime_data', 'datetime_as_string',
+ 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
+ ]
+
+# we don't need all these imports, but we need to keep them for compatibility
+# for users using np.core.numerictypes.UPPER_TABLE
+from ._string_helpers import (
+ english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
+)
+
+from ._type_aliases import (
+ sctypeDict,
+ sctypeNA,
+ allTypes,
+ bitname,
+ sctypes,
+ _concrete_types,
+ _concrete_typeinfo,
+ _bits_of,
+)
+from ._dtype import _kind_name
+
+# we don't export these for import *, but we do want them accessible
+# as numerictypes.bool, etc.
+if sys.version_info[0] >= 3:
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
+else:
+ from __builtin__ import bool, int, float, complex, object, unicode, str
+
+
+# We use this later
+generic = allTypes['generic']
+
+genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64', 'int128',
+ 'uint128', 'float16',
+ 'float32', 'float64', 'float80', 'float96', 'float128',
+ 'float256',
+ 'complex32', 'complex64', 'complex128', 'complex160',
+ 'complex192', 'complex256', 'complex512', 'object']
+
+def maximum_sctype(t):
+ """
+ Return the scalar type of highest precision of the same kind as the input.
+
+ Parameters
+ ----------
+ t : dtype or dtype specifier
+ The input data type. This can be a `dtype` object or an object that
+ is convertible to a `dtype`.
+
+ Returns
+ -------
+ out : dtype
+ The highest precision data type of the same kind (`dtype.kind`) as `t`.
+
+ See Also
+ --------
+ obj2sctype, mintypecode, sctype2char
+ dtype
+
+ Examples
+ --------
+ >>> np.maximum_sctype(int)
+
+ >>> np.maximum_sctype(np.uint8)
+
+ >>> np.maximum_sctype(complex)
+
+
+ >>> np.maximum_sctype(str)
+
+
+ >>> np.maximum_sctype('i2')
+
+ >>> np.maximum_sctype('f4')
+
+
+ """
+ g = obj2sctype(t)
+ if g is None:
+ return t
+ t = g
+ base = _kind_name(dtype(t))
+ if base in sctypes:
+ return sctypes[base][-1]
+ else:
+ return t
+
+
+@set_module('numpy')
+def issctype(rep):
+ """
+ Determines whether the given object represents a scalar data-type.
+
+ Parameters
+ ----------
+ rep : any
+ If `rep` is an instance of a scalar dtype, True is returned. If not,
+ False is returned.
+
+ Returns
+ -------
+ out : bool
+ Boolean result of check whether `rep` is a scalar dtype.
+
+ See Also
+ --------
+ issubsctype, issubdtype, obj2sctype, sctype2char
+
+ Examples
+ --------
+ >>> np.issctype(np.int32)
+ True
+ >>> np.issctype(list)
+ False
+ >>> np.issctype(1.1)
+ False
+
+ Strings are also a scalar type:
+
+ >>> np.issctype(np.dtype('str'))
+ True
+
+ """
+ if not isinstance(rep, (type, dtype)):
+ return False
+ try:
+ res = obj2sctype(rep)
+ if res and res != object_:
+ return True
+ return False
+ except Exception:
+ return False
+
+
+@set_module('numpy')
+def obj2sctype(rep, default=None):
+ """
+ Return the scalar dtype or NumPy equivalent of Python type of an object.
+
+ Parameters
+ ----------
+ rep : any
+ The object of which the type is returned.
+ default : any, optional
+ If given, this is returned for objects whose types can not be
+ determined. If not given, None is returned for those objects.
+
+ Returns
+ -------
+ dtype : dtype or Python type
+ The data type of `rep`.
+
+ See Also
+ --------
+ sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
+
+ Examples
+ --------
+ >>> np.obj2sctype(np.int32)
+
+ >>> np.obj2sctype(np.array([1., 2.]))
+
+ >>> np.obj2sctype(np.array([1.j]))
+
+
+ >>> np.obj2sctype(dict)
+
+ >>> np.obj2sctype('string')
+
+
+ >>> np.obj2sctype(1, default=list)
+
+
+ """
+ # prevent abtract classes being upcast
+ if isinstance(rep, type) and issubclass(rep, generic):
+ return rep
+ # extract dtype from arrays
+ if isinstance(rep, ndarray):
+ return rep.dtype.type
+ # fall back on dtype to convert
+ try:
+ res = dtype(rep)
+ except Exception:
+ return default
+ else:
+ return res.type
+
+
+@set_module('numpy')
+def issubclass_(arg1, arg2):
+ """
+ Determine if a class is a subclass of a second class.
+
+ `issubclass_` is equivalent to the Python built-in ``issubclass``,
+ except that it returns False instead of raising a TypeError if one
+ of the arguments is not a class.
+
+ Parameters
+ ----------
+ arg1 : class
+ Input class. True is returned if `arg1` is a subclass of `arg2`.
+ arg2 : class or tuple of classes.
+ Input class. If a tuple of classes, True is returned if `arg1` is a
+ subclass of any of the tuple elements.
+
+ Returns
+ -------
+ out : bool
+ Whether `arg1` is a subclass of `arg2` or not.
+
+ See Also
+ --------
+ issubsctype, issubdtype, issctype
+
+ Examples
+ --------
+ >>> np.issubclass_(np.int32, int)
+ True
+ >>> np.issubclass_(np.int32, float)
+ False
+
+ """
+ try:
+ return issubclass(arg1, arg2)
+ except TypeError:
+ return False
+
+
+@set_module('numpy')
+def issubsctype(arg1, arg2):
+ """
+ Determine if the first argument is a subclass of the second argument.
+
+ Parameters
+ ----------
+ arg1, arg2 : dtype or dtype specifier
+ Data-types.
+
+ Returns
+ -------
+ out : bool
+ The result.
+
+ See Also
+ --------
+ issctype, issubdtype,obj2sctype
+
+ Examples
+ --------
+ >>> np.issubsctype('S8', str)
+ True
+ >>> np.issubsctype(np.array([1]), int)
+ True
+ >>> np.issubsctype(np.array([1]), float)
+ False
+
+ """
+ return issubclass(obj2sctype(arg1), obj2sctype(arg2))
+
+
+@set_module('numpy')
+def issubdtype(arg1, arg2):
+ """
+ Returns True if first argument is a typecode lower/equal in type hierarchy.
+
+ Parameters
+ ----------
+ arg1, arg2 : dtype_like
+ dtype or string representing a typecode.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ issubsctype, issubclass_
+ numpy.core.numerictypes : Overview of numpy type hierarchy.
+
+ Examples
+ --------
+ >>> np.issubdtype('S1', np.string_)
+ True
+ >>> np.issubdtype(np.float64, np.float32)
+ False
+
+ """
+ if not issubclass_(arg1, generic):
+ arg1 = dtype(arg1).type
+ if not issubclass_(arg2, generic):
+ arg2_orig = arg2
+ arg2 = dtype(arg2).type
+ if not isinstance(arg2_orig, dtype):
+ # weird deprecated behaviour, that tried to infer np.floating from
+ # float, and similar less obvious things, such as np.generic from
+ # basestring
+ mro = arg2.mro()
+ arg2 = mro[1] if len(mro) > 1 else mro[0]
+
+ def type_repr(x):
+ """ Helper to produce clear error messages """
+ if not isinstance(x, type):
+ return repr(x)
+ elif issubclass(x, generic):
+ return "np.{}".format(x.__name__)
+ else:
+ return x.__name__
+
+ # 1.14, 2017-08-01
+ warnings.warn(
+ "Conversion of the second argument of issubdtype from `{raw}` "
+ "to `{abstract}` is deprecated. In future, it will be treated "
+ "as `{concrete} == np.dtype({raw}).type`.".format(
+ raw=type_repr(arg2_orig),
+ abstract=type_repr(arg2),
+ concrete=type_repr(dtype(arg2_orig).type)
+ ),
+ FutureWarning, stacklevel=2
+ )
+
+ return issubclass(arg1, arg2)
+
+
+# This dictionary allows look up based on any alias for an array data-type
+class _typedict(dict):
+ """
+ Base object for a dictionary for look-up with any alias for an array dtype.
+
+ Instances of `_typedict` can not be used as dictionaries directly,
+ first they have to be populated.
+
+ """
+
+ def __getitem__(self, obj):
+ return dict.__getitem__(self, obj2sctype(obj))
+
+nbytes = _typedict()
+_alignment = _typedict()
+_maxvals = _typedict()
+_minvals = _typedict()
+def _construct_lookups():
+ for name, info in _concrete_typeinfo.items():
+ obj = info.type
+ nbytes[obj] = info.bits // 8
+ _alignment[obj] = info.alignment
+ if len(info) > 5:
+ _maxvals[obj] = info.max
+ _minvals[obj] = info.min
+ else:
+ _maxvals[obj] = None
+ _minvals[obj] = None
+
+_construct_lookups()
+
+
+@set_module('numpy')
+def sctype2char(sctype):
+ """
+ Return the string representation of a scalar dtype.
+
+ Parameters
+ ----------
+ sctype : scalar dtype or object
+ If a scalar dtype, the corresponding string character is
+ returned. If an object, `sctype2char` tries to infer its scalar type
+ and then return the corresponding string character.
+
+ Returns
+ -------
+ typechar : str
+ The string character corresponding to the scalar type.
+
+ Raises
+ ------
+ ValueError
+ If `sctype` is an object for which the type can not be inferred.
+
+ See Also
+ --------
+ obj2sctype, issctype, issubsctype, mintypecode
+
+ Examples
+ --------
+ >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]:
+ ... print(np.sctype2char(sctype))
+ l
+ d
+ D
+ S
+ O
+
+ >>> x = np.array([1., 2-1.j])
+ >>> np.sctype2char(x)
+ 'D'
+ >>> np.sctype2char(list)
+ 'O'
+
+ """
+ sctype = obj2sctype(sctype)
+ if sctype is None:
+ raise ValueError("unrecognized type")
+ if sctype not in _concrete_types:
+ # for compatibility
+ raise KeyError(sctype)
+ return dtype(sctype).char
+
+# Create dictionary of casting functions that wrap sequences
+# indexed by type or type character
+cast = _typedict()
+for key in _concrete_types:
+ cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
+
+try:
+ ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
+ _types.LongType, _types.BooleanType,
+ _types.StringType, _types.UnicodeType, _types.BufferType]
+except AttributeError:
+ # Py3K
+ ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
+
+ScalarType.extend(_concrete_types)
+ScalarType = tuple(ScalarType)
+
+
+# Now add the types we've determined to this module
+for key in allTypes:
+ globals()[key] = allTypes[key]
+ __all__.append(key)
+
+del key
+
+typecodes = {'Character':'c',
+ 'Integer':'bhilqp',
+ 'UnsignedInteger':'BHILQP',
+ 'Float':'efdg',
+ 'Complex':'FDG',
+ 'AllInteger':'bBhHiIlLqQpP',
+ 'AllFloat':'efdgFDG',
+ 'Datetime': 'Mm',
+ 'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
+
+# backwards compatibility --- deprecated name
+typeDict = sctypeDict
+typeNA = sctypeNA
+
+# b -> boolean
+# u -> unsigned integer
+# i -> signed integer
+# f -> floating point
+# c -> complex
+# M -> datetime
+# m -> timedelta
+# S -> string
+# U -> Unicode string
+# V -> record
+# O -> Python object
+_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
+
+__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
+__len_test_types = len(__test_types)
+
+# Keep incrementing until a common type both can be coerced to
+# is found. Otherwise, return None
+def _find_common_coerce(a, b):
+ if a > b:
+ return a
+ try:
+ thisind = __test_types.index(a.char)
+ except ValueError:
+ return None
+ return _can_coerce_all([a, b], start=thisind)
+
+# Find a data-type that all data-types in a list can be coerced to
+def _can_coerce_all(dtypelist, start=0):
+ N = len(dtypelist)
+ if N == 0:
+ return None
+ if N == 1:
+ return dtypelist[0]
+ thisind = start
+ while thisind < __len_test_types:
+ newdtype = dtype(__test_types[thisind])
+ numcoerce = len([x for x in dtypelist if newdtype >= x])
+ if numcoerce == N:
+ return newdtype
+ thisind += 1
+ return None
+
+def _register_types():
+ numbers.Integral.register(integer)
+ numbers.Complex.register(inexact)
+ numbers.Real.register(floating)
+ numbers.Number.register(number)
+
+_register_types()
+
+
+@set_module('numpy')
+def find_common_type(array_types, scalar_types):
+ """
+ Determine common type following standard coercion rules.
+
+ Parameters
+ ----------
+ array_types : sequence
+ A list of dtypes or dtype convertible objects representing arrays.
+ scalar_types : sequence
+ A list of dtypes or dtype convertible objects representing scalars.
+
+ Returns
+ -------
+ datatype : dtype
+ The common data type, which is the maximum of `array_types` ignoring
+ `scalar_types`, unless the maximum of `scalar_types` is of a
+ different kind (`dtype.kind`). If the kind is not understood, then
+ None is returned.
+
+ See Also
+ --------
+ dtype, common_type, can_cast, mintypecode
+
+ Examples
+ --------
+ >>> np.find_common_type([], [np.int64, np.float32, complex])
+ dtype('complex128')
+ >>> np.find_common_type([np.int64, np.float32], [])
+ dtype('float64')
+
+ The standard casting rules ensure that a scalar cannot up-cast an
+ array unless the scalar is of a fundamentally different kind of data
+ (i.e. under a different hierarchy in the data type hierarchy) then
+ the array:
+
+ >>> np.find_common_type([np.float32], [np.int64, np.float64])
+ dtype('float32')
+
+ Complex is of a different type, so it up-casts the float in the
+ `array_types` argument:
+
+ >>> np.find_common_type([np.float32], [complex])
+ dtype('complex128')
+
+ Type specifier strings are convertible to dtypes and can therefore
+ be used instead of dtypes:
+
+ >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
+ dtype('complex128')
+
+ """
+ array_types = [dtype(x) for x in array_types]
+ scalar_types = [dtype(x) for x in scalar_types]
+
+ maxa = _can_coerce_all(array_types)
+ maxsc = _can_coerce_all(scalar_types)
+
+ if maxa is None:
+ return maxsc
+
+ if maxsc is None:
+ return maxa
+
+ try:
+ index_a = _kind_list.index(maxa.kind)
+ index_sc = _kind_list.index(maxsc.kind)
+ except ValueError:
+ return None
+
+ if index_sc > index_a:
+ return _find_common_coerce(maxsc, maxa)
+ else:
+ return maxa
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc
new file mode 100644
index 0000000..18638de
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/numerictypes.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py
new file mode 100644
index 0000000..c55174e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.py
@@ -0,0 +1,187 @@
+"""Implementation of __array_function__ overrides from NEP-18."""
+import collections
+import functools
+import os
+
+from numpy.core._multiarray_umath import (
+ add_docstring, implement_array_function, _get_implementing_args)
+from numpy.compat._inspect import getargspec
+
+
+ENABLE_ARRAY_FUNCTION = bool(
+ int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0)))
+
+
+add_docstring(
+ implement_array_function,
+ """
+ Implement a function with checks for __array_function__ overrides.
+
+ All arguments are required, and can only be passed by position.
+
+ Arguments
+ ---------
+ implementation : function
+ Function that implements the operation on NumPy array without
+ overrides when called like ``implementation(*args, **kwargs)``.
+ public_api : function
+ Function exposed by NumPy's public API originally called like
+ ``public_api(*args, **kwargs)`` on which arguments are now being
+ checked.
+ relevant_args : iterable
+ Iterable of arguments to check for __array_function__ methods.
+ args : tuple
+ Arbitrary positional arguments originally passed into ``public_api``.
+ kwargs : dict
+ Arbitrary keyword arguments originally passed into ``public_api``.
+
+ Returns
+ -------
+ Result from calling ``implementation()`` or an ``__array_function__``
+ method, as appropriate.
+
+ Raises
+ ------
+ TypeError : if no implementation is found.
+ """)
+
+
+# exposed for testing purposes; used internally by implement_array_function
+add_docstring(
+ _get_implementing_args,
+ """
+ Collect arguments on which to call __array_function__.
+
+ Parameters
+ ----------
+ relevant_args : iterable of array-like
+ Iterable of possibly array-like arguments to check for
+ __array_function__ methods.
+
+ Returns
+ -------
+ Sequence of arguments with __array_function__ methods, in the order in
+ which they should be called.
+ """)
+
+
+ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
+
+
+def verify_matching_signatures(implementation, dispatcher):
+ """Verify that a dispatcher function has the right signature."""
+ implementation_spec = ArgSpec(*getargspec(implementation))
+ dispatcher_spec = ArgSpec(*getargspec(dispatcher))
+
+ if (implementation_spec.args != dispatcher_spec.args or
+ implementation_spec.varargs != dispatcher_spec.varargs or
+ implementation_spec.keywords != dispatcher_spec.keywords or
+ (bool(implementation_spec.defaults) !=
+ bool(dispatcher_spec.defaults)) or
+ (implementation_spec.defaults is not None and
+ len(implementation_spec.defaults) !=
+ len(dispatcher_spec.defaults))):
+ raise RuntimeError('implementation and dispatcher for %s have '
+ 'different function signatures' % implementation)
+
+ if implementation_spec.defaults is not None:
+ if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
+ raise RuntimeError('dispatcher functions can only use None for '
+ 'default argument values')
+
+
+def set_module(module):
+ """Decorator for overriding __module__ on a function or class.
+
+ Example usage::
+
+ @set_module('numpy')
+ def example():
+ pass
+
+ assert example.__module__ == 'numpy'
+ """
+ def decorator(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return decorator
+
+
+def array_function_dispatch(dispatcher, module=None, verify=True,
+ docs_from_dispatcher=False):
+ """Decorator for adding dispatch with the __array_function__ protocol.
+
+ See NEP-18 for example usage.
+
+ Parameters
+ ----------
+ dispatcher : callable
+ Function that when called like ``dispatcher(*args, **kwargs)`` with
+ arguments from the NumPy function call returns an iterable of
+ array-like arguments to check for ``__array_function__``.
+ module : str, optional
+ __module__ attribute to set on new function, e.g., ``module='numpy'``.
+ By default, module is copied from the decorated function.
+ verify : bool, optional
+ If True, verify the that the signature of the dispatcher and decorated
+ function signatures match exactly: all required and optional arguments
+ should appear in order with the same names, but the default values for
+ all optional arguments should be ``None``. Only disable verification
+ if the dispatcher's signature needs to deviate for some particular
+ reason, e.g., because the function has a signature like
+ ``func(*args, **kwargs)``.
+ docs_from_dispatcher : bool, optional
+ If True, copy docs from the dispatcher function onto the dispatched
+ function, rather than from the implementation. This is useful for
+ functions defined in C, which otherwise don't have docstrings.
+
+ Returns
+ -------
+ Function suitable for decorating the implementation of a NumPy function.
+ """
+
+ if not ENABLE_ARRAY_FUNCTION:
+ # __array_function__ requires an explicit opt-in for now
+ def decorator(implementation):
+ if module is not None:
+ implementation.__module__ = module
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+ return implementation
+ return decorator
+
+ def decorator(implementation):
+ if verify:
+ verify_matching_signatures(implementation, dispatcher)
+
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, public_api, relevant_args, args, kwargs)
+
+ if module is not None:
+ public_api.__module__ = module
+
+ # TODO: remove this when we drop Python 2 support (functools.wraps
+ # adds __wrapped__ automatically in later versions)
+ public_api.__wrapped__ = implementation
+
+ return public_api
+
+ return decorator
+
+
+def array_function_from_dispatcher(
+ implementation, module=None, verify=True, docs_from_dispatcher=True):
+ """Like array_function_dispatcher, but with function arguments flipped."""
+
+ def decorator(dispatcher):
+ return array_function_dispatch(
+ dispatcher, module, verify=verify,
+ docs_from_dispatcher=docs_from_dispatcher)(implementation)
+ return decorator
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc
new file mode 100644
index 0000000..2f836f9
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/overrides.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/records.py b/project/venv/lib/python2.7/site-packages/numpy/core/records.py
new file mode 100644
index 0000000..5898bb1
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/records.py
@@ -0,0 +1,879 @@
+"""
+Record Arrays
+=============
+Record arrays expose the fields of structured arrays as properties.
+
+Most commonly, ndarrays contain elements of a single type, e.g. floats,
+integers, bools etc. However, it is possible for elements to be combinations
+of these using structured types, such as::
+
+ >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
+ >>> a
+ array([(1, 2.0), (1, 2.0)],
+ dtype=[('x', '>> a['x']
+ array([1, 1])
+
+ >>> a['y']
+ array([ 2., 2.])
+
+Record arrays allow us to access fields as properties::
+
+ >>> ar = np.rec.array(a)
+
+ >>> ar.x
+ array([1, 1])
+
+ >>> ar.y
+ array([ 2., 2.])
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+import warnings
+
+from . import numeric as sb
+from . import numerictypes as nt
+from numpy.compat import isfileobj, bytes, long, unicode, os_fspath
+from numpy.core.overrides import set_module
+from .arrayprint import get_printoptions
+
+# All of the functions allow formats to be a dtype
+__all__ = ['record', 'recarray', 'format_parser']
+
+
+ndarray = sb.ndarray
+
+_byteorderconv = {'b':'>',
+ 'l':'<',
+ 'n':'=',
+ 'B':'>',
+ 'L':'<',
+ 'N':'=',
+ 'S':'s',
+ 's':'s',
+ '>':'>',
+ '<':'<',
+ '=':'=',
+ '|':'|',
+ 'I':'|',
+ 'i':'|'}
+
+# formats regular expression
+# allows multidimension spec with a tuple syntax in front
+# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
+# are equally allowed
+
+numfmt = nt.typeDict
+
+def find_duplicate(list):
+ """Find duplication in a list, return a list of duplicated elements"""
+ dup = []
+ for i in range(len(list)):
+ if (list[i] in list[i + 1:]):
+ if (list[i] not in dup):
+ dup.append(list[i])
+ return dup
+
+
+@set_module('numpy')
+class format_parser(object):
+ """
+ Class to convert formats, names, titles description to a dtype.
+
+ After constructing the format_parser object, the dtype attribute is
+ the converted data-type:
+ ``dtype = format_parser(formats, names, titles).dtype``
+
+ Attributes
+ ----------
+ dtype : dtype
+ The converted data-type.
+
+ Parameters
+ ----------
+ formats : str or list of str
+ The format description, either specified as a string with
+ comma-separated format descriptions in the form ``'f8, i4, a5'``, or
+ a list of format description strings in the form
+ ``['f8', 'i4', 'a5']``.
+ names : str or list/tuple of str
+ The field names, either specified as a comma-separated string in the
+ form ``'col1, col2, col3'``, or as a list or tuple of strings in the
+ form ``['col1', 'col2', 'col3']``.
+ An empty list can be used, in that case default field names
+ ('f0', 'f1', ...) are used.
+ titles : sequence
+ Sequence of title strings. An empty list can be used to leave titles
+ out.
+ aligned : bool, optional
+ If True, align the fields by padding as the C-compiler would.
+ Default is False.
+ byteorder : str, optional
+ If specified, all the fields will be changed to the
+ provided byte-order. Otherwise, the default byte-order is
+ used. For all available string specifiers, see `dtype.newbyteorder`.
+
+ See Also
+ --------
+ dtype, typename, sctype2char
+
+ Examples
+ --------
+ >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
+ ... ['T1', 'T2', 'T3']).dtype
+ dtype([(('T1', 'col1'), '>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
+ ... []).dtype
+ dtype([('col1', '>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
+ dtype([('f0', ' len(titles)):
+ self._titles += [None] * (self._nfields - len(titles))
+
+ def _createdescr(self, byteorder):
+ descr = sb.dtype({'names':self._names,
+ 'formats':self._f_formats,
+ 'offsets':self._offsets,
+ 'titles':self._titles})
+ if (byteorder is not None):
+ byteorder = _byteorderconv[byteorder[0]]
+ descr = descr.newbyteorder(byteorder)
+
+ self._descr = descr
+
+class record(nt.void):
+ """A data-type scalar that allows field access as attribute lookup.
+ """
+
+ # manually set name and module so that this class's type shows up
+ # as numpy.record when printed
+ __name__ = 'record'
+ __module__ = 'numpy'
+
+ def __repr__(self):
+ if get_printoptions()['legacy'] == '1.13':
+ return self.__str__()
+ return super(record, self).__repr__()
+
+ def __str__(self):
+ if get_printoptions()['legacy'] == '1.13':
+ return str(self.item())
+ return super(record, self).__str__()
+
+ def __getattribute__(self, attr):
+ if attr in ['setfield', 'getfield', 'dtype']:
+ return nt.void.__getattribute__(self, attr)
+ try:
+ return nt.void.__getattribute__(self, attr)
+ except AttributeError:
+ pass
+ fielddict = nt.void.__getattribute__(self, 'dtype').fields
+ res = fielddict.get(attr, None)
+ if res:
+ obj = self.getfield(*res[:2])
+ # if it has fields return a record,
+ # otherwise return the object
+ try:
+ dt = obj.dtype
+ except AttributeError:
+ #happens if field is Object type
+ return obj
+ if dt.fields:
+ return obj.view((self.__class__, obj.dtype.fields))
+ return obj
+ else:
+ raise AttributeError("'record' object has no "
+ "attribute '%s'" % attr)
+
+ def __setattr__(self, attr, val):
+ if attr in ['setfield', 'getfield', 'dtype']:
+ raise AttributeError("Cannot set '%s' attribute" % attr)
+ fielddict = nt.void.__getattribute__(self, 'dtype').fields
+ res = fielddict.get(attr, None)
+ if res:
+ return self.setfield(val, *res[:2])
+ else:
+ if getattr(self, attr, None):
+ return nt.void.__setattr__(self, attr, val)
+ else:
+ raise AttributeError("'record' object has no "
+ "attribute '%s'" % attr)
+
+ def __getitem__(self, indx):
+ obj = nt.void.__getitem__(self, indx)
+
+ # copy behavior of record.__getattribute__,
+ if isinstance(obj, nt.void) and obj.dtype.fields:
+ return obj.view((self.__class__, obj.dtype.fields))
+ else:
+ # return a single element
+ return obj
+
+ def pprint(self):
+ """Pretty-print all fields."""
+ # pretty-print all fields
+ names = self.dtype.names
+ maxlen = max(len(name) for name in names)
+ fmt = '%% %ds: %%s' % maxlen
+ rows = [fmt % (name, getattr(self, name)) for name in names]
+ return "\n".join(rows)
+
+# The recarray is almost identical to a standard array (which supports
+# named fields already) The biggest difference is that it can use
+# attribute-lookup to find the fields and it is constructed using
+# a record.
+
+# If byteorder is given it forces a particular byteorder on all
+# the fields (and any subfields)
+
+class recarray(ndarray):
+ """Construct an ndarray that allows field access using attributes.
+
+ Arrays may have a data-types containing fields, analogous
+ to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
+ where each entry in the array is a pair of ``(int, float)``. Normally,
+ these attributes are accessed using dictionary lookups such as ``arr['x']``
+ and ``arr['y']``. Record arrays allow the fields to be accessed as members
+ of the array, using ``arr.x`` and ``arr.y``.
+
+ Parameters
+ ----------
+ shape : tuple
+ Shape of output array.
+ dtype : data-type, optional
+ The desired data-type. By default, the data-type is determined
+ from `formats`, `names`, `titles`, `aligned` and `byteorder`.
+ formats : list of data-types, optional
+ A list containing the data-types for the different columns, e.g.
+ ``['i4', 'f8', 'i4']``. `formats` does *not* support the new
+ convention of using types directly, i.e. ``(int, float, int)``.
+ Note that `formats` must be a list, not a tuple.
+ Given that `formats` is somewhat limited, we recommend specifying
+ `dtype` instead.
+ names : tuple of str, optional
+ The name of each column, e.g. ``('x', 'y', 'z')``.
+ buf : buffer, optional
+ By default, a new array is created of the given shape and data-type.
+ If `buf` is specified and is an object exposing the buffer interface,
+ the array will use the memory from the existing buffer. In this case,
+ the `offset` and `strides` keywords are available.
+
+ Other Parameters
+ ----------------
+ titles : tuple of str, optional
+ Aliases for column names. For example, if `names` were
+ ``('x', 'y', 'z')`` and `titles` is
+ ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
+ ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
+ byteorder : {'<', '>', '='}, optional
+ Byte-order for all fields.
+ aligned : bool, optional
+ Align the fields in memory as the C-compiler would.
+ strides : tuple of ints, optional
+ Buffer (`buf`) is interpreted according to these strides (strides
+ define how many bytes each array element, row, column, etc.
+ occupy in memory).
+ offset : int, optional
+ Start reading buffer (`buf`) from this offset onwards.
+ order : {'C', 'F'}, optional
+ Row-major (C-style) or column-major (Fortran-style) order.
+
+ Returns
+ -------
+ rec : recarray
+ Empty array of the given shape and type.
+
+ See Also
+ --------
+ rec.fromrecords : Construct a record array from data.
+ record : fundamental data-type for `recarray`.
+ format_parser : determine a data-type from formats, names, titles.
+
+ Notes
+ -----
+ This constructor can be compared to ``empty``: it creates a new record
+ array but does not fill it with data. To create a record array from data,
+ use one of the following methods:
+
+ 1. Create a standard ndarray and convert it to a record array,
+ using ``arr.view(np.recarray)``
+ 2. Use the `buf` keyword.
+ 3. Use `np.rec.fromrecords`.
+
+ Examples
+ --------
+ Create an array with two fields, ``x`` and ``y``:
+
+ >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
+ >>> x
+ array([(1.0, 2), (3.0, 4)],
+ dtype=[('x', '>> x['x']
+ array([ 1., 3.])
+
+ View the array as a record array:
+
+ >>> x = x.view(np.recarray)
+
+ >>> x.x
+ array([ 1., 3.])
+
+ >>> x.y
+ array([2, 4])
+
+ Create a new, empty record array:
+
+ >>> np.recarray((2,),
+ ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
+ rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
+ (3471280, 1.2134086255804012e-316, 0)],
+ dtype=[('x', ' 0 or self.shape == (0,):
+ lst = sb.array2string(
+ self, separator=', ', prefix=prefix, suffix=',')
+ else:
+ # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(self.shape),)
+
+ lf = '\n'+' '*len(prefix)
+ if get_printoptions()['legacy'] == '1.13':
+ lf = ' ' + lf # trailing space
+ return fmt % (lst, lf, repr_dtype)
+
+ def field(self, attr, val=None):
+ if isinstance(attr, int):
+ names = ndarray.__getattribute__(self, 'dtype').names
+ attr = names[attr]
+
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
+
+ res = fielddict[attr][:2]
+
+ if val is None:
+ obj = self.getfield(*res)
+ if obj.dtype.fields:
+ return obj
+ return obj.view(ndarray)
+ else:
+ return self.setfield(val, *res)
+
+
+def fromarrays(arrayList, dtype=None, shape=None, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ """ create a record array from a (flat) list of arrays
+
+ >>> x1=np.array([1,2,3,4])
+ >>> x2=np.array(['a','dd','xyz','12'])
+ >>> x3=np.array([1.1,2,3,4])
+ >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
+ >>> print(r[1])
+ (2, 'dd', 2.0)
+ >>> x1[1]=34
+ >>> r.a
+ array([1, 2, 3, 4])
+ """
+
+ arrayList = [sb.asarray(x) for x in arrayList]
+
+ if shape is None or shape == 0:
+ shape = arrayList[0].shape
+
+ if isinstance(shape, int):
+ shape = (shape,)
+
+ if formats is None and dtype is None:
+ # go through each object in the list to see if it is an ndarray
+ # and determine the formats.
+ formats = []
+ for obj in arrayList:
+ if not isinstance(obj, ndarray):
+ raise ValueError("item in the array list must be an ndarray.")
+ formats.append(obj.dtype.str)
+ formats = ','.join(formats)
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ _names = descr.names
+ else:
+ parsed = format_parser(formats, names, titles, aligned, byteorder)
+ _names = parsed._names
+ descr = parsed._descr
+
+ # Determine shape from data-type.
+ if len(descr) != len(arrayList):
+ raise ValueError("mismatch between the number of fields "
+ "and the number of arrays")
+
+ d0 = descr[0].shape
+ nn = len(d0)
+ if nn > 0:
+ shape = shape[:-nn]
+
+ for k, obj in enumerate(arrayList):
+ nn = descr[k].ndim
+ testshape = obj.shape[:obj.ndim - nn]
+ if testshape != shape:
+ raise ValueError("array-shape mismatch in array %d" % k)
+
+ _array = recarray(shape, descr)
+
+ # populate the record array (makes a copy)
+ for i in range(len(arrayList)):
+ _array[_names[i]] = arrayList[i]
+
+ return _array
+
+def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
+ titles=None, aligned=False, byteorder=None):
+ """ create a recarray from a list of records in text form
+
+ The data in the same field can be heterogeneous, they will be promoted
+ to the highest data type. This method is intended for creating
+ smaller record arrays. If used to create large array without formats
+ defined
+
+ r=fromrecords([(2,3.,'abc')]*100000)
+
+ it can be slow.
+
+ If formats is None, then this will auto-detect formats. Use list of
+ tuples rather than list of lists for faster processing.
+
+ >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
+ ... names='col1,col2,col3')
+ >>> print(r[0])
+ (456, 'dbe', 1.2)
+ >>> r.col1
+ array([456, 2])
+ >>> r.col2
+ array(['dbe', 'de'],
+ dtype='|S3')
+ >>> import pickle
+ >>> print(pickle.loads(pickle.dumps(r)))
+ [(456, 'dbe', 1.2) (2, 'de', 1.3)]
+ """
+
+ if formats is None and dtype is None: # slower
+ obj = sb.array(recList, dtype=object)
+ arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
+ return fromarrays(arrlist, formats=formats, shape=shape, names=names,
+ titles=titles, aligned=aligned, byteorder=byteorder)
+
+ if dtype is not None:
+ descr = sb.dtype((record, dtype))
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+
+ try:
+ retval = sb.array(recList, dtype=descr)
+ except (TypeError, ValueError):
+ if (shape is None or shape == 0):
+ shape = len(recList)
+ if isinstance(shape, (int, long)):
+ shape = (shape,)
+ if len(shape) > 1:
+ raise ValueError("Can only deal with 1-d array.")
+ _array = recarray(shape, descr)
+ for k in range(_array.size):
+ _array[k] = tuple(recList[k])
+ # list of lists instead of list of tuples ?
+ # 2018-02-07, 1.14.1
+ warnings.warn(
+ "fromrecords expected a list of tuples, may have received a list "
+ "of lists instead. In the future that will raise an error",
+ FutureWarning, stacklevel=2)
+ return _array
+ else:
+ if shape is not None and retval.shape != shape:
+ retval.shape = shape
+
+ res = retval.view(recarray)
+
+ return res
+
+
+def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ """ create a (read-only) record array from binary data contained in
+ a string"""
+
+ if dtype is None and formats is None:
+ raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+
+ itemsize = descr.itemsize
+ if (shape is None or shape == 0 or shape == -1):
+ shape = (len(datastring) - offset) // itemsize
+
+ _array = recarray(shape, descr, buf=datastring, offset=offset)
+ return _array
+
+def get_remaining_size(fd):
+ try:
+ fn = fd.fileno()
+ except AttributeError:
+ return os.path.getsize(fd.name) - fd.tell()
+ st = os.fstat(fn)
+ size = st.st_size - fd.tell()
+ return size
+
+def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ """Create an array from binary file data
+
+ If file is a string or a path-like object then that file is opened,
+ else it is assumed to be a file object. The file object must
+ support random access (i.e. it must have tell and seek methods).
+
+ >>> from tempfile import TemporaryFile
+ >>> a = np.empty(10,dtype='f8,i4,a5')
+ >>> a[5] = (0.5,10,'abcde')
+ >>>
+ >>> fd=TemporaryFile()
+ >>> a = a.newbyteorder('<')
+ >>> a.tofile(fd)
+ >>>
+ >>> fd.seek(0)
+ >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
+ ... byteorder='<')
+ >>> print(r[5])
+ (0.5, 10, 'abcde')
+ >>> r.shape
+ (10,)
+ """
+
+ if dtype is None and formats is None:
+ raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
+
+ if (shape is None or shape == 0):
+ shape = (-1,)
+ elif isinstance(shape, (int, long)):
+ shape = (shape,)
+
+ if isfileobj(fd):
+ # file already opened
+ name = 0
+ else:
+ # open file
+ fd = open(os_fspath(fd), 'rb')
+ name = 1
+
+ if (offset > 0):
+ fd.seek(offset, 1)
+ size = get_remaining_size(fd)
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+
+ itemsize = descr.itemsize
+
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
+ shapesize = shapeprod * itemsize
+ if shapesize < 0:
+ shape = list(shape)
+ shape[shape.index(-1)] = size // -shapesize
+ shape = tuple(shape)
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
+
+ nbytes = shapeprod * itemsize
+
+ if nbytes > size:
+ raise ValueError(
+ "Not enough bytes left in file for specified shape and type")
+
+ # create the array
+ _array = recarray(shape, descr)
+ nbytesread = fd.readinto(_array.data)
+ if nbytesread != nbytes:
+ raise IOError("Didn't read as many bytes as expected")
+ if name:
+ fd.close()
+
+ return _array
+
+def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None, copy=True):
+ """Construct a record array from a wide-variety of objects.
+ """
+
+ if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
+ (formats is None) and (dtype is None)):
+ raise ValueError("Must define formats (or dtype) if object is "
+ "None, string, or an open file")
+
+ kwds = {}
+ if dtype is not None:
+ dtype = sb.dtype(dtype)
+ elif formats is not None:
+ dtype = format_parser(formats, names, titles,
+ aligned, byteorder)._descr
+ else:
+ kwds = {'formats': formats,
+ 'names': names,
+ 'titles': titles,
+ 'aligned': aligned,
+ 'byteorder': byteorder
+ }
+
+ if obj is None:
+ if shape is None:
+ raise ValueError("Must define a shape if obj is None")
+ return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
+
+ elif isinstance(obj, bytes):
+ return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
+
+ elif isinstance(obj, (list, tuple)):
+ if isinstance(obj[0], (tuple, list)):
+ return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
+ else:
+ return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
+
+ elif isinstance(obj, recarray):
+ if dtype is not None and (obj.dtype != dtype):
+ new = obj.view(dtype)
+ else:
+ new = obj
+ if copy:
+ new = new.copy()
+ return new
+
+ elif isfileobj(obj):
+ return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
+
+ elif isinstance(obj, ndarray):
+ if dtype is not None and (obj.dtype != dtype):
+ new = obj.view(dtype)
+ else:
+ new = obj
+ if copy:
+ new = new.copy()
+ return new.view(recarray)
+
+ else:
+ interface = getattr(obj, "__array_interface__", None)
+ if interface is None or not isinstance(interface, dict):
+ raise ValueError("Unknown input type")
+ obj = sb.array(obj)
+ if dtype is not None and (obj.dtype != dtype):
+ obj = obj.view(dtype)
+ return obj.view(recarray)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc
new file mode 100644
index 0000000..e08c31c
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/records.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup.py b/project/venv/lib/python2.7/site-packages/numpy/core/setup.py
new file mode 100644
index 0000000..9ccca62
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/setup.py
@@ -0,0 +1,974 @@
+from __future__ import division, print_function
+
+import os
+import sys
+import pickle
+import copy
+import warnings
+import platform
+from os.path import join
+from numpy.distutils import log
+from distutils.dep_util import newer
+from distutils.sysconfig import get_config_var
+from numpy._build_utils.apple_accelerate import (
+ uses_accelerate_framework, get_sgemv_fix
+ )
+from numpy.compat import npy_load_module
+from setup_common import *
+
+# Set to True to enable relaxed strides checking. This (mostly) means
+# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
+
+# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
+# bogus value for affected strides in order to help smoke out bad stride usage
+# when relaxed stride checking is enabled.
+NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
+NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
+
+# XXX: ugly, we use a class to avoid calling twice some expensive functions in
+# config.h/numpyconfig.h. I don't see a better way because distutils force
+# config.h generation inside an Extension class, and as such sharing
+# configuration information between extensions is not easy.
+# Using a pickled-based memoize does not work because config_cmd is an instance
+# method, which cPickle does not like.
+#
+# Use pickle in all cases, as cPickle is gone in python3 and the difference
+# in time is only in build. -- Charles Harris, 2013-03-30
+
+class CallOnceOnly(object):
+ def __init__(self):
+ self._check_types = None
+ self._check_ieee_macros = None
+ self._check_complex = None
+
+ def check_types(self, *a, **kw):
+ if self._check_types is None:
+ out = check_types(*a, **kw)
+ self._check_types = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_types))
+ return out
+
+ def check_ieee_macros(self, *a, **kw):
+ if self._check_ieee_macros is None:
+ out = check_ieee_macros(*a, **kw)
+ self._check_ieee_macros = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
+ return out
+
+ def check_complex(self, *a, **kw):
+ if self._check_complex is None:
+ out = check_complex(*a, **kw)
+ self._check_complex = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_complex))
+ return out
+
+def pythonlib_dir():
+ """return path where libpython* is."""
+ if sys.platform == 'win32':
+ return os.path.join(sys.prefix, "libs")
+ else:
+ return get_config_var('LIBDIR')
+
+def is_npy_no_signal():
+ """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
+ header."""
+ return sys.platform == 'win32'
+
+def is_npy_no_smp():
+ """Return True if the NPY_NO_SMP symbol must be defined in public
+ header (when SMP support cannot be reliably enabled)."""
+ # Perhaps a fancier check is in order here.
+ # so that threads are only enabled if there
+ # are actually multiple CPUS? -- but
+ # threaded code can be nice even on a single
+ # CPU so that long-calculating code doesn't
+ # block.
+ return 'NPY_NOSMP' in os.environ
+
+def win32_checks(deflist):
+ from numpy.distutils.misc_util import get_build_architecture
+ a = get_build_architecture()
+
+ # Distutils hack on AMD64 on windows
+ print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
+ (a, os.name, sys.platform))
+ if a == 'AMD64':
+ deflist.append('DISTUTILS_USE_SDK')
+
+ # On win32, force long double format string to be 'g', not
+ # 'Lg', since the MS runtime does not support long double whose
+ # size is > sizeof(double)
+ if a == "Intel" or a == "AMD64":
+ deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
+
+def check_math_capabilities(config, moredefs, mathlibs):
+ def check_func(func_name):
+ return config.check_func(func_name, libraries=mathlibs,
+ decl=True, call=True)
+
+ def check_funcs_once(funcs_name):
+ decl = dict([(f, True) for f in funcs_name])
+ st = config.check_funcs_once(funcs_name, libraries=mathlibs,
+ decl=decl, call=decl)
+ if st:
+ moredefs.extend([(fname2def(f), 1) for f in funcs_name])
+ return st
+
+ def check_funcs(funcs_name):
+ # Use check_funcs_once first, and if it does not work, test func per
+ # func. Return success only if all the functions are available
+ if not check_funcs_once(funcs_name):
+ # Global check failed, check func per func
+ for f in funcs_name:
+ if check_func(f):
+ moredefs.append((fname2def(f), 1))
+ return 0
+ else:
+ return 1
+
+ #use_msvc = config.check_decl("_MSC_VER")
+
+ if not check_funcs_once(MANDATORY_FUNCS):
+ raise SystemError("One of the required function to build numpy is not"
+ " available (the list is %s)." % str(MANDATORY_FUNCS))
+
+ # Standard functions which may not be available and for which we have a
+ # replacement implementation. Note that some of these are C99 functions.
+
+ # XXX: hack to circumvent cpp pollution from python: python put its
+ # config.h in the public namespace, so we have a clash for the common
+ # functions we test. We remove every function tested by python's
+ # autoconf, hoping their own test are correct
+ for f in OPTIONAL_STDFUNCS_MAYBE:
+ if config.check_decl(fname2def(f),
+ headers=["Python.h", "math.h"]):
+ OPTIONAL_STDFUNCS.remove(f)
+
+ check_funcs(OPTIONAL_STDFUNCS)
+
+ for h in OPTIONAL_HEADERS:
+ if config.check_func("", decl=False, call=False, headers=[h]):
+ h = h.replace(".", "_").replace(os.path.sep, "_")
+ moredefs.append((fname2def(h), 1))
+
+ for tup in OPTIONAL_INTRINSICS:
+ headers = None
+ if len(tup) == 2:
+ f, args, m = tup[0], tup[1], fname2def(tup[0])
+ elif len(tup) == 3:
+ f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
+ else:
+ f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
+ if config.check_func(f, decl=False, call=True, call_args=args,
+ headers=headers):
+ moredefs.append((m, 1))
+
+ for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
+ if config.check_gcc_function_attribute(dec, fn):
+ moredefs.append((fname2def(fn), 1))
+
+ for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
+ if config.check_gcc_variable_attribute(fn):
+ m = fn.replace("(", "_").replace(")", "_")
+ moredefs.append((fname2def(m), 1))
+
+ # C99 functions: float and long double versions
+ check_funcs(C99_FUNCS_SINGLE)
+ check_funcs(C99_FUNCS_EXTENDED)
+
+def check_complex(config, mathlibs):
+ priv = []
+ pub = []
+
+ try:
+ if os.uname()[0] == "Interix":
+ warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
+ return priv, pub
+ except Exception:
+ # os.uname not available on all platforms. blanket except ugly but safe
+ pass
+
+ # Check for complex support
+ st = config.check_header('complex.h')
+ if st:
+ priv.append(('HAVE_COMPLEX_H', 1))
+ pub.append(('NPY_USE_C99_COMPLEX', 1))
+
+ for t in C99_COMPLEX_TYPES:
+ st = config.check_type(t, headers=["complex.h"])
+ if st:
+ pub.append(('NPY_HAVE_%s' % type2def(t), 1))
+
+ def check_prec(prec):
+ flist = [f + prec for f in C99_COMPLEX_FUNCS]
+ decl = dict([(f, True) for f in flist])
+ if not config.check_funcs_once(flist, call=decl, decl=decl,
+ libraries=mathlibs):
+ for f in flist:
+ if config.check_func(f, call=True, decl=True,
+ libraries=mathlibs):
+ priv.append((fname2def(f), 1))
+ else:
+ priv.extend([(fname2def(f), 1) for f in flist])
+
+ check_prec('')
+ check_prec('f')
+ check_prec('l')
+
+ return priv, pub
+
+def check_ieee_macros(config):
+ priv = []
+ pub = []
+
+ macros = []
+
+ def _add_decl(f):
+ priv.append(fname2def("decl_%s" % f))
+ pub.append('NPY_%s' % fname2def("decl_%s" % f))
+
+ # XXX: hack to circumvent cpp pollution from python: python put its
+ # config.h in the public namespace, so we have a clash for the common
+ # functions we test. We remove every function tested by python's
+ # autoconf, hoping their own test are correct
+ _macros = ["isnan", "isinf", "signbit", "isfinite"]
+ for f in _macros:
+ py_symbol = fname2def("decl_%s" % f)
+ already_declared = config.check_decl(py_symbol,
+ headers=["Python.h", "math.h"])
+ if already_declared:
+ if config.check_macro_true(py_symbol,
+ headers=["Python.h", "math.h"]):
+ pub.append('NPY_%s' % fname2def("decl_%s" % f))
+ else:
+ macros.append(f)
+ # Normally, isnan and isinf are macro (C99), but some platforms only have
+ # func, or both func and macro version. Check for macro only, and define
+ # replacement ones if not found.
+ # Note: including Python.h is necessary because it modifies some math.h
+ # definitions
+ for f in macros:
+ st = config.check_decl(f, headers=["Python.h", "math.h"])
+ if st:
+ _add_decl(f)
+
+ return priv, pub
+
+def check_types(config_cmd, ext, build_dir):
+ private_defines = []
+ public_defines = []
+
+ # Expected size (in number of bytes) for each type. This is an
+ # optimization: those are only hints, and an exhaustive search for the size
+ # is done if the hints are wrong.
+ expected = {'short': [2], 'int': [4], 'long': [8, 4],
+ 'float': [4], 'double': [8], 'long double': [16, 12, 8],
+ 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
+ 'off_t': [8, 4]}
+
+ # Check we have the python header (-dev* packages on Linux)
+ result = config_cmd.check_header('Python.h')
+ if not result:
+ python = 'python'
+ if '__pypy__' in sys.builtin_module_names:
+ python = 'pypy'
+ raise SystemError(
+ "Cannot compile 'Python.h'. Perhaps you need to "
+ "install {0}-dev|{0}-devel.".format(python))
+ res = config_cmd.check_header("endian.h")
+ if res:
+ private_defines.append(('HAVE_ENDIAN_H', 1))
+ public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
+ res = config_cmd.check_header("sys/endian.h")
+ if res:
+ private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
+ public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
+
+ # Check basic types sizes
+ for type in ('short', 'int', 'long'):
+ res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
+ if res:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
+ else:
+ res = config_cmd.check_type_size(type, expected=expected[type])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ for type in ('float', 'double', 'long double'):
+ already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
+ headers=["Python.h"])
+ res = config_cmd.check_type_size(type, expected=expected[type])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ if not already_declared and not type == 'long double':
+ private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ # Compute size of corresponding complex type: used to check that our
+ # definition is binary compatible with C99 complex type (check done at
+ # build time in npy_common.h)
+ complex_def = "struct {%s __x; %s __y;}" % (type, type)
+ res = config_cmd.check_type_size(complex_def,
+ expected=[2 * x for x in expected[type]])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % complex_def)
+
+ for type in ('Py_intptr_t', 'off_t'):
+ res = config_cmd.check_type_size(type, headers=["Python.h"],
+ library_dirs=[pythonlib_dir()],
+ expected=expected[type])
+
+ if res >= 0:
+ private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ # We check declaration AND type because that's how distutils does it.
+ if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
+ res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
+ library_dirs=[pythonlib_dir()],
+ expected=expected['PY_LONG_LONG'])
+ if res >= 0:
+ private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
+
+ res = config_cmd.check_type_size('long long',
+ expected=expected['long long'])
+ if res >= 0:
+ #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % 'long long')
+
+ if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
+ raise RuntimeError(
+ "Config wo CHAR_BIT is not supported"
+ ", please contact the maintainers")
+
+ return private_defines, public_defines
+
+def check_mathlib(config_cmd):
+ # Testing the C math library
+ mathlibs = []
+ mathlibs_choices = [[], ['m'], ['cpml']]
+ mathlib = os.environ.get('MATHLIB')
+ if mathlib:
+ mathlibs_choices.insert(0, mathlib.split(','))
+ for libs in mathlibs_choices:
+ if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
+ mathlibs = libs
+ break
+ else:
+ raise EnvironmentError("math library missing; rerun "
+ "setup.py after setting the "
+ "MATHLIB env variable")
+ return mathlibs
+
+def visibility_define(config):
+ """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
+ string)."""
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
+ else:
+ return ''
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration, dot_join
+ from numpy.distutils.system_info import get_info
+
+ config = Configuration('core', parent_package, top_path)
+ local_dir = config.local_path
+ codegen_dir = join(local_dir, 'code_generators')
+
+ if is_released(config):
+ warnings.simplefilter('error', MismatchCAPIWarning)
+
+ # Check whether we have a mismatch between the set C API VERSION and the
+ # actual C API VERSION
+ check_api_version(C_API_VERSION, codegen_dir)
+
+ generate_umath_py = join(codegen_dir, 'generate_umath.py')
+ n = dot_join(config.name, 'generate_umath')
+ generate_umath = npy_load_module('_'.join(n.split('.')),
+ generate_umath_py, ('.py', 'U', 1))
+
+ header_dir = 'include/numpy' # this is relative to config.path_in_package
+
+ cocache = CallOnceOnly()
+
+ def generate_config_h(ext, build_dir):
+ target = join(build_dir, header_dir, 'config.h')
+ d = os.path.dirname(target)
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ if newer(__file__, target):
+ config_cmd = config.get_config_cmd()
+ log.info('Generating %s', target)
+
+ # Check sizeof
+ moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
+
+ # Check math library and C99 math funcs availability
+ mathlibs = check_mathlib(config_cmd)
+ moredefs.append(('MATHLIB', ','.join(mathlibs)))
+
+ check_math_capabilities(config_cmd, moredefs, mathlibs)
+ moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
+ moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
+
+ # Signal check
+ if is_npy_no_signal():
+ moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
+
+ # Windows checks
+ if sys.platform == 'win32' or os.name == 'nt':
+ win32_checks(moredefs)
+
+ # C99 restrict keyword
+ moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
+
+ # Inline check
+ inline = config_cmd.check_inline()
+
+ # Use relaxed stride checking
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
+ # Use bogus stride debug aid when relaxed strides are enabled
+ if NPY_RELAXED_STRIDES_DEBUG:
+ moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
+
+ # Get long double representation
+ rep = check_long_double_representation(config_cmd)
+ moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
+
+ # Py3K check
+ if sys.version_info[0] == 3:
+ moredefs.append(('NPY_PY3K', 1))
+
+ # Generate the config.h file from moredefs
+ target_f = open(target, 'w')
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # define inline to our keyword, or nothing
+ target_f.write('#ifndef __cplusplus\n')
+ if inline == 'inline':
+ target_f.write('/* #undef inline */\n')
+ else:
+ target_f.write('#define inline %s\n' % inline)
+ target_f.write('#endif\n')
+
+ # add the guard to make sure config.h is never included directly,
+ # but always through npy_config.h
+ target_f.write("""
+#ifndef _NPY_NPY_CONFIG_H_
+#error config.h should never be included directly, include npy_config.h instead
+#endif
+""")
+
+ target_f.close()
+ print('File:', target)
+ target_f = open(target)
+ print(target_f.read())
+ target_f.close()
+ print('EOF')
+ else:
+ mathlibs = []
+ target_f = open(target)
+ for line in target_f:
+ s = '#define MATHLIB'
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
+ target_f.close()
+
+ # Ugly: this can be called within a library and not an extension,
+ # in which case there is no libraries attributes (and none is
+ # needed).
+ if hasattr(ext, 'libraries'):
+ ext.libraries.extend(mathlibs)
+
+ incl_dir = os.path.dirname(target)
+ if incl_dir not in config.numpy_include_dirs:
+ config.numpy_include_dirs.append(incl_dir)
+
+ return target
+
+ def generate_numpyconfig_h(ext, build_dir):
+ """Depends on config.h: generate_config_h has to be called before !"""
+ # put common include directory in build_dir on search path
+ # allows using code generation in headers headers
+ config.add_include_dirs(join(build_dir, "src", "common"))
+ config.add_include_dirs(join(build_dir, "src", "npymath"))
+
+ target = join(build_dir, header_dir, '_numpyconfig.h')
+ d = os.path.dirname(target)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ if newer(__file__, target):
+ config_cmd = config.get_config_cmd()
+ log.info('Generating %s', target)
+
+ # Check sizeof
+ ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
+
+ if is_npy_no_signal():
+ moredefs.append(('NPY_NO_SIGNAL', 1))
+
+ if is_npy_no_smp():
+ moredefs.append(('NPY_NO_SMP', 1))
+ else:
+ moredefs.append(('NPY_NO_SMP', 0))
+
+ mathlibs = check_mathlib(config_cmd)
+ moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
+ moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
+
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
+ if NPY_RELAXED_STRIDES_DEBUG:
+ moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
+
+ # Check whether we can use inttypes (C99) formats
+ if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
+ moredefs.append(('NPY_USE_C99_FORMATS', 1))
+
+ # visibility check
+ hidden_visibility = visibility_define(config_cmd)
+ moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
+
+ # Add the C API/ABI versions
+ moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
+ moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
+
+ # Add moredefs to header
+ target_f = open(target, 'w')
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # Define __STDC_FORMAT_MACROS
+ target_f.write("""
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
+""")
+ target_f.close()
+
+ # Dump the numpyconfig.h header to stdout
+ print('File: %s' % target)
+ target_f = open(target)
+ print(target_f.read())
+ target_f.close()
+ print('EOF')
+ config.add_data_files((header_dir, target))
+ return target
+
+ def generate_api_func(module_name):
+ def generate_api(ext, build_dir):
+ script = join(codegen_dir, module_name + '.py')
+ sys.path.insert(0, codegen_dir)
+ try:
+ m = __import__(module_name)
+ log.info('executing %s', script)
+ h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
+ finally:
+ del sys.path[0]
+ config.add_data_files((header_dir, h_file),
+ (header_dir, doc_file))
+ return (h_file,)
+ return generate_api
+
+ generate_numpy_api = generate_api_func('generate_numpy_api')
+ generate_ufunc_api = generate_api_func('generate_ufunc_api')
+
+ config.add_include_dirs(join(local_dir, "src", "common"))
+ config.add_include_dirs(join(local_dir, "src"))
+ config.add_include_dirs(join(local_dir))
+
+ config.add_data_files('include/numpy/*.h')
+ config.add_include_dirs(join('src', 'npymath'))
+ config.add_include_dirs(join('src', 'multiarray'))
+ config.add_include_dirs(join('src', 'umath'))
+ config.add_include_dirs(join('src', 'npysort'))
+
+ config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
+ config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
+ if sys.platform[:3] == "aix":
+ config.add_define_macros([("_LARGE_FILES", None)])
+ else:
+ config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
+ config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
+ config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
+
+ config.numpy_include_dirs.extend(config.paths('include'))
+
+ deps = [join('src', 'npymath', '_signbit.c'),
+ join('include', 'numpy', '*object.h'),
+ join(codegen_dir, 'genapi.py'),
+ ]
+
+ #######################################################################
+ # dummy module #
+ #######################################################################
+
+ # npymath needs the config.h and numpyconfig.h files to be generated, but
+ # build_clib cannot handle generate_config_h and generate_numpyconfig_h
+ # (don't ask). Because clib are generated before extensions, we have to
+ # explicitly add an extension which has generate_config_h and
+ # generate_numpyconfig_h as sources *before* adding npymath.
+
+ config.add_extension('_dummy',
+ sources=[join('src', 'dummymodule.c'),
+ generate_config_h,
+ generate_numpyconfig_h,
+ generate_numpy_api]
+ )
+
+ #######################################################################
+ # npymath library #
+ #######################################################################
+
+ subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
+
+ def get_mathlib_info(*args):
+ # Another ugly hack: the mathlib info is known once build_src is run,
+ # but we cannot use add_installed_pkg_config here either, so we only
+ # update the substitution dictionary during npymath build
+ config_cmd = config.get_config_cmd()
+
+ # Check that the toolchain works, to fail early if it doesn't
+ # (avoid late errors with MATHLIB which are confusing if the
+ # compiler does not work).
+ st = config_cmd.try_link('int main(void) { return 0;}')
+ if not st:
+ raise RuntimeError("Broken toolchain: cannot link a simple C program")
+ mlibs = check_mathlib(config_cmd)
+
+ posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
+ msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
+ subst_dict["posix_mathlib"] = posix_mlib
+ subst_dict["msvc_mathlib"] = msvc_mlib
+
+ npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
+ join('src', 'npymath', 'npy_math.c'),
+ join('src', 'npymath', 'ieee754.c.src'),
+ join('src', 'npymath', 'npy_math_complex.c.src'),
+ join('src', 'npymath', 'halffloat.c')
+ ]
+
+ # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
+ is_msvc = platform.system() == 'Windows'
+ config.add_installed_library('npymath',
+ sources=npymath_sources + [get_mathlib_info],
+ install_dir='lib',
+ build_info={
+ 'include_dirs' : [], # empty list required for creating npy_math_internal.h
+ 'extra_compiler_args' : (['/GL-'] if is_msvc else []),
+ })
+ config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+ config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+
+ #######################################################################
+ # npysort library #
+ #######################################################################
+
+ # This library is created for the build but it is not installed
+ npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'quicksort.c.src'),
+ join('src', 'npysort', 'mergesort.c.src'),
+ join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'common', 'npy_partition.h.src'),
+ join('src', 'npysort', 'selection.c.src'),
+ join('src', 'common', 'npy_binsearch.h.src'),
+ join('src', 'npysort', 'binsearch.c.src'),
+ ]
+ config.add_library('npysort',
+ sources=npysort_sources,
+ include_dirs=[])
+
+ #######################################################################
+ # multiarray_tests module #
+ #######################################################################
+
+ config.add_extension('_multiarray_tests',
+ sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
+ join('src', 'common', 'mem_overlap.c')],
+ depends=[join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_extint128.h')],
+ libraries=['npymath'])
+
+ #######################################################################
+ # _multiarray_umath module - common part #
+ #######################################################################
+
+ common_deps = [
+ join('src', 'common', 'array_assign.h'),
+ join('src', 'common', 'binop_override.h'),
+ join('src', 'common', 'cblasfuncs.h'),
+ join('src', 'common', 'lowlevel_strided_loops.h'),
+ join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_cblas.h'),
+ join('src', 'common', 'npy_config.h'),
+ join('src', 'common', 'npy_ctypes.h'),
+ join('src', 'common', 'npy_extint128.h'),
+ join('src', 'common', 'npy_import.h'),
+ join('src', 'common', 'npy_longdouble.h'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.h'),
+ join('src', 'common', 'ufunc_override.h'),
+ join('src', 'common', 'umathmodule.h'),
+ join('src', 'common', 'numpyos.h'),
+ ]
+
+ common_src = [
+ join('src', 'common', 'array_assign.c'),
+ join('src', 'common', 'mem_overlap.c'),
+ join('src', 'common', 'npy_longdouble.c'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.c'),
+ join('src', 'common', 'ufunc_override.c'),
+ join('src', 'common', 'numpyos.c'),
+ ]
+
+ blas_info = get_info('blas_opt', 0)
+ if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ extra_info = blas_info
+ # These files are also in MANIFEST.in so that they are always in
+ # the source distribution independently of HAVE_CBLAS.
+ common_src.extend([join('src', 'common', 'cblasfuncs.c'),
+ join('src', 'common', 'python_xerbla.c'),
+ ])
+ if uses_accelerate_framework(blas_info):
+ common_src.extend(get_sgemv_fix())
+ else:
+ extra_info = {}
+
+ #######################################################################
+ # _multiarray_umath module - multiarray part #
+ #######################################################################
+
+ multiarray_deps = [
+ join('src', 'multiarray', 'arrayobject.h'),
+ join('src', 'multiarray', 'arraytypes.h'),
+ join('src', 'multiarray', 'arrayfunction_override.h'),
+ join('src', 'multiarray', 'buffer.h'),
+ join('src', 'multiarray', 'calculation.h'),
+ join('src', 'multiarray', 'common.h'),
+ join('src', 'multiarray', 'convert_datatype.h'),
+ join('src', 'multiarray', 'convert.h'),
+ join('src', 'multiarray', 'conversion_utils.h'),
+ join('src', 'multiarray', 'ctors.h'),
+ join('src', 'multiarray', 'descriptor.h'),
+ join('src', 'multiarray', 'dragon4.h'),
+ join('src', 'multiarray', 'getset.h'),
+ join('src', 'multiarray', 'hashdescr.h'),
+ join('src', 'multiarray', 'iterators.h'),
+ join('src', 'multiarray', 'mapping.h'),
+ join('src', 'multiarray', 'methods.h'),
+ join('src', 'multiarray', 'multiarraymodule.h'),
+ join('src', 'multiarray', 'nditer_impl.h'),
+ join('src', 'multiarray', 'number.h'),
+ join('src', 'multiarray', 'refcount.h'),
+ join('src', 'multiarray', 'scalartypes.h'),
+ join('src', 'multiarray', 'sequence.h'),
+ join('src', 'multiarray', 'shape.h'),
+ join('src', 'multiarray', 'strfuncs.h'),
+ join('src', 'multiarray', 'typeinfo.h'),
+ join('src', 'multiarray', 'usertypes.h'),
+ join('src', 'multiarray', 'vdot.h'),
+ join('include', 'numpy', 'arrayobject.h'),
+ join('include', 'numpy', '_neighborhood_iterator_imp.h'),
+ join('include', 'numpy', 'npy_endian.h'),
+ join('include', 'numpy', 'arrayscalars.h'),
+ join('include', 'numpy', 'noprefix.h'),
+ join('include', 'numpy', 'npy_interrupt.h'),
+ join('include', 'numpy', 'npy_3kcompat.h'),
+ join('include', 'numpy', 'npy_math.h'),
+ join('include', 'numpy', 'halffloat.h'),
+ join('include', 'numpy', 'npy_common.h'),
+ join('include', 'numpy', 'npy_os.h'),
+ join('include', 'numpy', 'utils.h'),
+ join('include', 'numpy', 'ndarrayobject.h'),
+ join('include', 'numpy', 'npy_cpu.h'),
+ join('include', 'numpy', 'numpyconfig.h'),
+ join('include', 'numpy', 'ndarraytypes.h'),
+ join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
+ # add library sources as distuils does not consider libraries
+ # dependencies
+ ] + npysort_sources + npymath_sources
+
+ multiarray_src = [
+ join('src', 'multiarray', 'alloc.c'),
+ join('src', 'multiarray', 'arrayobject.c'),
+ join('src', 'multiarray', 'arraytypes.c.src'),
+ join('src', 'multiarray', 'array_assign_scalar.c'),
+ join('src', 'multiarray', 'array_assign_array.c'),
+ join('src', 'multiarray', 'arrayfunction_override.c'),
+ join('src', 'multiarray', 'buffer.c'),
+ join('src', 'multiarray', 'calculation.c'),
+ join('src', 'multiarray', 'compiled_base.c'),
+ join('src', 'multiarray', 'common.c'),
+ join('src', 'multiarray', 'convert.c'),
+ join('src', 'multiarray', 'convert_datatype.c'),
+ join('src', 'multiarray', 'conversion_utils.c'),
+ join('src', 'multiarray', 'ctors.c'),
+ join('src', 'multiarray', 'datetime.c'),
+ join('src', 'multiarray', 'datetime_strings.c'),
+ join('src', 'multiarray', 'datetime_busday.c'),
+ join('src', 'multiarray', 'datetime_busdaycal.c'),
+ join('src', 'multiarray', 'descriptor.c'),
+ join('src', 'multiarray', 'dragon4.c'),
+ join('src', 'multiarray', 'dtype_transfer.c'),
+ join('src', 'multiarray', 'einsum.c.src'),
+ join('src', 'multiarray', 'flagsobject.c'),
+ join('src', 'multiarray', 'getset.c'),
+ join('src', 'multiarray', 'hashdescr.c'),
+ join('src', 'multiarray', 'item_selection.c'),
+ join('src', 'multiarray', 'iterators.c'),
+ join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
+ join('src', 'multiarray', 'mapping.c'),
+ join('src', 'multiarray', 'methods.c'),
+ join('src', 'multiarray', 'multiarraymodule.c'),
+ join('src', 'multiarray', 'nditer_templ.c.src'),
+ join('src', 'multiarray', 'nditer_api.c'),
+ join('src', 'multiarray', 'nditer_constr.c'),
+ join('src', 'multiarray', 'nditer_pywrap.c'),
+ join('src', 'multiarray', 'number.c'),
+ join('src', 'multiarray', 'refcount.c'),
+ join('src', 'multiarray', 'sequence.c'),
+ join('src', 'multiarray', 'shape.c'),
+ join('src', 'multiarray', 'scalarapi.c'),
+ join('src', 'multiarray', 'scalartypes.c.src'),
+ join('src', 'multiarray', 'strfuncs.c'),
+ join('src', 'multiarray', 'temp_elide.c'),
+ join('src', 'multiarray', 'typeinfo.c'),
+ join('src', 'multiarray', 'usertypes.c'),
+ join('src', 'multiarray', 'vdot.c'),
+ ]
+
+ #######################################################################
+ # _multiarray_umath module - umath part #
+ #######################################################################
+
+ def generate_umath_c(ext, build_dir):
+ target = join(build_dir, header_dir, '__umath_generated.c')
+ dir = os.path.dirname(target)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ script = generate_umath_py
+ if newer(script, target):
+ f = open(target, 'w')
+ f.write(generate_umath.make_code(generate_umath.defdict,
+ generate_umath.__file__))
+ f.close()
+ return []
+
+ umath_src = [
+ join('src', 'umath', 'umathmodule.c'),
+ join('src', 'umath', 'reduction.c'),
+ join('src', 'umath', 'funcs.inc.src'),
+ join('src', 'umath', 'simd.inc.src'),
+ join('src', 'umath', 'loops.h.src'),
+ join('src', 'umath', 'loops.c.src'),
+ join('src', 'umath', 'matmul.h.src'),
+ join('src', 'umath', 'matmul.c.src'),
+ join('src', 'umath', 'ufunc_object.c'),
+ join('src', 'umath', 'extobj.c'),
+ join('src', 'umath', 'cpuid.c'),
+ join('src', 'umath', 'scalarmath.c.src'),
+ join('src', 'umath', 'ufunc_type_resolution.c'),
+ join('src', 'umath', 'override.c'),
+ ]
+
+ umath_deps = [
+ generate_umath_py,
+ join('include', 'numpy', 'npy_math.h'),
+ join('include', 'numpy', 'halffloat.h'),
+ join('src', 'multiarray', 'common.h'),
+ join('src', 'multiarray', 'number.h'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'umath', 'simd.inc.src'),
+ join('src', 'umath', 'override.h'),
+ join(codegen_dir, 'generate_ufunc_api.py'),
+ ]
+
+ config.add_extension('_multiarray_umath',
+ sources=multiarray_src + umath_src +
+ npymath_sources + common_src +
+ [generate_config_h,
+ generate_numpyconfig_h,
+ generate_numpy_api,
+ join(codegen_dir, 'generate_numpy_api.py'),
+ join('*.py'),
+ generate_umath_c,
+ generate_ufunc_api,
+ ],
+ depends=deps + multiarray_deps + umath_deps +
+ common_deps,
+ libraries=['npymath', 'npysort'],
+ extra_info=extra_info)
+
+ #######################################################################
+ # umath_tests module #
+ #######################################################################
+
+ config.add_extension('_umath_tests',
+ sources=[join('src', 'umath', '_umath_tests.c.src')])
+
+ #######################################################################
+ # custom rational dtype module #
+ #######################################################################
+
+ config.add_extension('_rational_tests',
+ sources=[join('src', 'umath', '_rational_tests.c.src')])
+
+ #######################################################################
+ # struct_ufunc_test module #
+ #######################################################################
+
+ config.add_extension('_struct_ufunc_tests',
+ sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])
+
+
+ #######################################################################
+ # operand_flag_tests module #
+ #######################################################################
+
+ config.add_extension('_operand_flag_tests',
+ sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
+
+ config.add_data_dir('tests')
+ config.add_data_dir('tests/data')
+
+ config.make_svn_version_py()
+
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc
new file mode 100644
index 0000000..2466efd
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/setup.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py
new file mode 100644
index 0000000..f837df1
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.py
@@ -0,0 +1,401 @@
+from __future__ import division, absolute_import, print_function
+
+# Code common to build tools
+import sys
+import warnings
+import copy
+import binascii
+
+from numpy.distutils.misc_util import mingw32
+
+
+#-------------------
+# Versioning support
+#-------------------
+# How to change C_API_VERSION ?
+# - increase C_API_VERSION value
+# - record the hash for the new C API with the script cversions.py
+# and add the hash to cversions.txt
+# The hash values are used to remind developers when the C API number was not
+# updated - generates a MismatchCAPIWarning warning which is turned into an
+# exception for released version.
+
+# Binary compatibility version number. This number is increased whenever the
+# C-API is changed such that binary compatibility is broken, i.e. whenever a
+# recompile of extension modules is needed.
+C_ABI_VERSION = 0x01000009
+
+# Minor API version. This number is increased whenever a change is made to the
+# C-API -- whether it breaks binary compatibility or not. Some changes, such
+# as adding a function pointer to the end of the function table, can be made
+# without breaking binary compatibility. In this case, only the C_API_VERSION
+# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
+# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
+#
+# 0x00000008 - 1.7.x
+# 0x00000009 - 1.8.x
+# 0x00000009 - 1.9.x
+# 0x0000000a - 1.10.x
+# 0x0000000a - 1.11.x
+# 0x0000000a - 1.12.x
+# 0x0000000b - 1.13.x
+# 0x0000000c - 1.14.x
+# 0x0000000c - 1.15.x
+# 0x0000000d - 1.16.x
+C_API_VERSION = 0x0000000d
+
+class MismatchCAPIWarning(Warning):
+ pass
+
+def is_released(config):
+ """Return True if a released version of numpy is detected."""
+ from distutils.version import LooseVersion
+
+ v = config.get_version('../version.py')
+ if v is None:
+ raise ValueError("Could not get version")
+ pv = LooseVersion(vstring=v).version
+ if len(pv) > 3:
+ return False
+ return True
+
+def get_api_versions(apiversion, codegen_dir):
+ """
+ Return current C API checksum and the recorded checksum.
+
+ Return current C API checksum and the recorded checksum for the given
+ version of the C API version.
+
+ """
+ # Compute the hash of the current API as defined in the .txt files in
+ # code_generators
+ sys.path.insert(0, codegen_dir)
+ try:
+ m = __import__('genapi')
+ numpy_api = __import__('numpy_api')
+ curapi_hash = m.fullapi_hash(numpy_api.full_api)
+ apis_hash = m.get_versions_hash()
+ finally:
+ del sys.path[0]
+
+ return curapi_hash, apis_hash[apiversion]
+
+def check_api_version(apiversion, codegen_dir):
+ """Emits a MismacthCAPIWarning if the C API version needs updating."""
+ curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
+
+ # If different hash, it means that the api .txt files in
+ # codegen_dir have been updated without the API version being
+ # updated. Any modification in those .txt files should be reflected
+ # in the api and eventually abi versions.
+ # To compute the checksum of the current API, use
+ # code_generators/cversions.py script
+ if not curapi_hash == api_hash:
+ msg = ("API mismatch detected, the C API version "
+ "numbers have to be updated. Current C api version is %d, "
+ "with checksum %s, but recorded checksum for C API version %d in "
+ "codegen_dir/cversions.txt is %s. If functions were added in the "
+ "C API, you have to update C_API_VERSION in %s."
+ )
+ warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
+ __file__),
+ MismatchCAPIWarning, stacklevel=2)
+# Mandatory functions: if not found, fail the build
+MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
+ "floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
+ "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
+
+# Standard functions which may not be available and for which we have a
+# replacement implementation. Note that some of these are C99 functions.
+OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
+ "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
+ "copysign", "nextafter", "ftello", "fseeko",
+ "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate",
+ "backtrace", "madvise"]
+
+
+OPTIONAL_HEADERS = [
+# sse headers only enabled automatically on amd64/x32 builds
+ "xmmintrin.h", # SSE
+ "emmintrin.h", # SSE2
+ "features.h", # for glibc version linux
+ "xlocale.h", # see GH#8367
+ "dlfcn.h", # dladdr
+ "sys/mman.h", #madvise
+]
+
+# optional gcc compiler builtins and their call arguments and optional a
+# required header and definition name (HAVE_ prepended)
+# call arguments are required as the compiler will do strict signature checking
+OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
+ ("__builtin_isinf", '5.'),
+ ("__builtin_isfinite", '5.'),
+ ("__builtin_bswap32", '5u'),
+ ("__builtin_bswap64", '5u'),
+ ("__builtin_expect", '5, 0'),
+ ("__builtin_mul_overflow", '5, 5, (int*)5'),
+ # broken on OSX 10.11, make sure its not optimized away
+ ("volatile int r = __builtin_cpu_supports", '"sse"',
+ "stdio.h", "__BUILTIN_CPU_SUPPORTS"),
+ # MMX only needed for icc, but some clangs don't have it
+ ("_m_from_int64", '0', "emmintrin.h"),
+ ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
+ ("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
+ "xmmintrin.h"), # SSE
+ ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
+ ("__builtin_prefetch", "(float*)0, 0, 3"),
+ # check that the linker can handle avx
+ ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
+ "stdio.h", "LINK_AVX"),
+ ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
+ "stdio.h", "LINK_AVX2"),
+ ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
+ ]
+
+# function attributes
+# tested via "int %s %s(void *);" % (attribute, name)
+# function name will be converted to HAVE_ preprocessor macro
+OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
+ 'attribute_optimize_unroll_loops'),
+ ('__attribute__((optimize("O3")))',
+ 'attribute_optimize_opt_3'),
+ ('__attribute__((nonnull (1)))',
+ 'attribute_nonnull'),
+ ('__attribute__((target ("avx")))',
+ 'attribute_target_avx'),
+ ('__attribute__((target ("avx2")))',
+ 'attribute_target_avx2'),
+ ]
+
+# variable attributes tested via "int %s a" % attribute
+OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
+
+# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
+OPTIONAL_STDFUNCS_MAYBE = [
+ "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
+ "ftello", "fseeko"
+ ]
+
+# C99 functions: float and long double versions
+C99_FUNCS = [
+ "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
+ "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
+ "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
+ "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
+ "nextafter", "cbrt"
+ ]
+C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
+C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
+C99_COMPLEX_TYPES = [
+ 'complex double', 'complex float', 'complex long double'
+ ]
+C99_COMPLEX_FUNCS = [
+ "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
+ "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
+ "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
+ ]
+
+def fname2def(name):
+ return "HAVE_%s" % name.upper()
+
+def sym2def(symbol):
+ define = symbol.replace(' ', '')
+ return define.upper()
+
+def type2def(symbol):
+ define = symbol.replace(' ', '_')
+ return define.upper()
+
+# Code to detect long double representation taken from MPFR m4 macro
+def check_long_double_representation(cmd):
+ cmd._check_compiler()
+ body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
+
+ # Disable whole program optimization (the default on vs2015, with python 3.5+)
+ # which generates intermediary object files and prevents checking the
+ # float representation.
+ if sys.platform == "win32" and not mingw32():
+ try:
+ cmd.compiler.compile_options.remove("/GL")
+ except (AttributeError, ValueError):
+ pass
+
+ # Disable multi-file interprocedural optimization in the Intel compiler on Linux
+ # which generates intermediary object files and prevents checking the
+ # float representation.
+ elif (sys.platform != "win32"
+ and cmd.compiler.compiler_type.startswith('intel')
+ and '-ipo' in cmd.compiler.cc_exe):
+ newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
+ cmd.compiler.set_executables(
+ compiler=newcompiler,
+ compiler_so=newcompiler,
+ compiler_cxx=newcompiler,
+ linker_exe=newcompiler,
+ linker_so=newcompiler + ' -shared'
+ )
+
+ # We need to use _compile because we need the object filename
+ src, obj = cmd._compile(body, None, None, 'c')
+ try:
+ ltype = long_double_representation(pyod(obj))
+ return ltype
+ except ValueError:
+ # try linking to support CC="gcc -flto" or icc -ipo
+ # struct needs to be volatile so it isn't optimized away
+ body = body.replace('struct', 'volatile struct')
+ body += "int main(void) { return 0; }\n"
+ src, obj = cmd._compile(body, None, None, 'c')
+ cmd.temp_files.append("_configtest")
+ cmd.compiler.link_executable([obj], "_configtest")
+ ltype = long_double_representation(pyod("_configtest"))
+ return ltype
+ finally:
+ cmd._clean()
+
+LONG_DOUBLE_REPRESENTATION_SRC = r"""
+/* "before" is 16 bytes to ensure there's no padding between it and "x".
+ * We're not expecting any "long double" bigger than 16 bytes or with
+ * alignment requirements stricter than 16 bytes. */
+typedef %(type)s test_type;
+
+struct {
+ char before[16];
+ test_type x;
+ char after[8];
+} foo = {
+ { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
+ -123456789.0,
+ { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
+};
+"""
+
+def pyod(filename):
+ """Python implementation of the od UNIX utility (od -b, more exactly).
+
+ Parameters
+ ----------
+ filename : str
+ name of the file to get the dump from.
+
+ Returns
+ -------
+ out : seq
+ list of lines of od output
+
+ Note
+ ----
+ We only implement enough to get the necessary information for long double
+ representation, this is not intended as a compatible replacement for od.
+ """
+ def _pyod2():
+ out = []
+
+ fid = open(filename, 'rb')
+ try:
+ yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
+ for i in range(0, len(yo), 16):
+ line = ['%07d' % int(oct(i))]
+ line.extend(['%03d' % c for c in yo[i:i+16]])
+ out.append(" ".join(line))
+ return out
+ finally:
+ fid.close()
+
+ def _pyod3():
+ out = []
+
+ fid = open(filename, 'rb')
+ try:
+ yo2 = [oct(o)[2:] for o in fid.read()]
+ for i in range(0, len(yo2), 16):
+ line = ['%07d' % int(oct(i)[2:])]
+ line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
+ out.append(" ".join(line))
+ return out
+ finally:
+ fid.close()
+
+ if sys.version_info[0] < 3:
+ return _pyod2()
+ else:
+ return _pyod3()
+
+_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
+ '001', '043', '105', '147', '211', '253', '315', '357']
+_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
+
+_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
+_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
+_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
+ '031', '300', '000', '000']
+_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
+ '031', '300', '000', '000', '000', '000', '000', '000']
+_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
+ '242', '240', '000', '000', '000', '000']
+_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
+ '000', '000', '000', '000', '000', '000', '000', '000']
+_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
+_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
+ ['000'] * 8)
+_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
+ ['000'] * 8)
+
+def long_double_representation(lines):
+ """Given a binary dump as given by GNU od -b, look for long double
+ representation."""
+
+ # Read contains a list of 32 items, each item is a byte (in octal
+ # representation, as a string). We 'slide' over the output until read is of
+ # the form before_seq + content + after_sequence, where content is the long double
+ # representation:
+ # - content is 12 bytes: 80 bits Intel representation
+ # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
+ # - content is 8 bytes: same as double (not implemented yet)
+ read = [''] * 32
+ saw = None
+ for line in lines:
+ # we skip the first word, as od -b output an index at the beginning of
+ # each line
+ for w in line.split()[1:]:
+ read.pop(0)
+ read.append(w)
+
+ # If the end of read is equal to the after_sequence, read contains
+ # the long double
+ if read[-8:] == _AFTER_SEQ:
+ saw = copy.copy(read)
+ # if the content was 12 bytes, we only have 32 - 8 - 12 = 12
+ # "before" bytes. In other words the first 4 "before" bytes went
+ # past the sliding window.
+ if read[:12] == _BEFORE_SEQ[4:]:
+ if read[12:-8] == _INTEL_EXTENDED_12B:
+ return 'INTEL_EXTENDED_12_BYTES_LE'
+ if read[12:-8] == _MOTOROLA_EXTENDED_12B:
+ return 'MOTOROLA_EXTENDED_12_BYTES_BE'
+ # if the content was 16 bytes, we are left with 32-8-16 = 16
+ # "before" bytes, so 8 went past the sliding window.
+ elif read[:8] == _BEFORE_SEQ[8:]:
+ if read[8:-8] == _INTEL_EXTENDED_16B:
+ return 'INTEL_EXTENDED_16_BYTES_LE'
+ elif read[8:-8] == _IEEE_QUAD_PREC_BE:
+ return 'IEEE_QUAD_BE'
+ elif read[8:-8] == _IEEE_QUAD_PREC_LE:
+ return 'IEEE_QUAD_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
+ return 'IBM_DOUBLE_DOUBLE_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
+ return 'IBM_DOUBLE_DOUBLE_BE'
+ # if the content was 8 bytes, left with 32-8-8 = 16 bytes
+ elif read[:16] == _BEFORE_SEQ:
+ if read[16:-8] == _IEEE_DOUBLE_LE:
+ return 'IEEE_DOUBLE_LE'
+ elif read[16:-8] == _IEEE_DOUBLE_BE:
+ return 'IEEE_DOUBLE_BE'
+
+ if saw is not None:
+ raise ValueError("Unrecognized format (%s)" % saw)
+ else:
+ # We never detected the after_sequence
+ raise ValueError("Could not lock sequences (%s)" % saw)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc
new file mode 100644
index 0000000..3557770
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/setup_common.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py
new file mode 100644
index 0000000..d20afd8
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.py
@@ -0,0 +1,888 @@
+from __future__ import division, absolute_import, print_function
+
+__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
+ 'stack', 'vstack']
+
+import functools
+import operator
+import types
+import warnings
+
+from . import numeric as _nx
+from . import overrides
+from .numeric import array, asanyarray, newaxis
+from .multiarray import normalize_axis_index
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _atleast_1d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_1d_dispatcher)
+def atleast_1d(*arys):
+ """
+ Convert inputs to arrays with at least one dimension.
+
+ Scalar inputs are converted to 1-dimensional arrays, whilst
+ higher-dimensional inputs are preserved.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more input arrays.
+
+ Returns
+ -------
+ ret : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 1``.
+ Copies are made only if necessary.
+
+ See Also
+ --------
+ atleast_2d, atleast_3d
+
+ Examples
+ --------
+ >>> np.atleast_1d(1.0)
+ array([ 1.])
+
+ >>> x = np.arange(9.0).reshape(3,3)
+ >>> np.atleast_1d(x)
+ array([[ 0., 1., 2.],
+ [ 3., 4., 5.],
+ [ 6., 7., 8.]])
+ >>> np.atleast_1d(x) is x
+ True
+
+ >>> np.atleast_1d(1, [3, 4])
+ [array([1]), array([3, 4])]
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1)
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _atleast_2d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_2d_dispatcher)
+def atleast_2d(*arys):
+ """
+ View inputs as arrays with at least two dimensions.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more array-like sequences. Non-array inputs are converted
+ to arrays. Arrays that already have two or more dimensions are
+ preserved.
+
+ Returns
+ -------
+ res, res2, ... : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 2``.
+ Copies are avoided where possible, and views with two or more
+ dimensions are returned.
+
+ See Also
+ --------
+ atleast_1d, atleast_3d
+
+ Examples
+ --------
+ >>> np.atleast_2d(3.0)
+ array([[ 3.]])
+
+ >>> x = np.arange(3.0)
+ >>> np.atleast_2d(x)
+ array([[ 0., 1., 2.]])
+ >>> np.atleast_2d(x).base is x
+ True
+
+ >>> np.atleast_2d(1, [1, 2], [[1, 2]])
+ [array([[1]]), array([[1, 2]]), array([[1, 2]])]
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1, 1)
+ elif ary.ndim == 1:
+ result = ary[newaxis,:]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _atleast_3d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_3d_dispatcher)
+def atleast_3d(*arys):
+ """
+ View inputs as arrays with at least three dimensions.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more array-like sequences. Non-array inputs are converted to
+ arrays. Arrays that already have three or more dimensions are
+ preserved.
+
+ Returns
+ -------
+ res1, res2, ... : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
+ avoided where possible, and views with three or more dimensions are
+ returned. For example, a 1-D array of shape ``(N,)`` becomes a view
+ of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
+ view of shape ``(M, N, 1)``.
+
+ See Also
+ --------
+ atleast_1d, atleast_2d
+
+ Examples
+ --------
+ >>> np.atleast_3d(3.0)
+ array([[[ 3.]]])
+
+ >>> x = np.arange(3.0)
+ >>> np.atleast_3d(x).shape
+ (1, 3, 1)
+
+ >>> x = np.arange(12.0).reshape(4,3)
+ >>> np.atleast_3d(x).shape
+ (4, 3, 1)
+ >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
+ True
+
+ >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
+ ... print(arr, arr.shape)
+ ...
+ [[[1]
+ [2]]] (1, 2, 1)
+ [[[1]
+ [2]]] (1, 2, 1)
+ [[[1 2]]] (1, 1, 2)
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1, 1, 1)
+ elif ary.ndim == 1:
+ result = ary[newaxis,:, newaxis]
+ elif ary.ndim == 2:
+ result = ary[:,:, newaxis]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
+ if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
+ warnings.warn('arrays to stack must be passed as a "sequence" type '
+ 'such as list or tuple. Support for non-sequence '
+ 'iterables such as generators is deprecated as of '
+ 'NumPy 1.16 and will raise an error in the future.',
+ FutureWarning, stacklevel=stacklevel)
+ return ()
+ return arrays
+
+
+def _warn_for_nonsequence(arrays):
+ if not overrides.ENABLE_ARRAY_FUNCTION:
+ _arrays_for_stack_dispatcher(arrays, stacklevel=4)
+
+
+def _vhstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def vstack(tup):
+ """
+ Stack arrays in sequence vertically (row wise).
+
+ This is equivalent to concatenation along the first axis after 1-D arrays
+ of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
+ `vsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ Parameters
+ ----------
+ tup : sequence of ndarrays
+ The arrays must have the same shape along all but the first axis.
+ 1-D arrays must have the same length.
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays, will be at least 2-D.
+
+ See Also
+ --------
+ stack : Join a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+ concatenate : Join a sequence of arrays along an existing axis.
+ vsplit : Split array into a list of multiple sub-arrays vertically.
+ block : Assemble arrays from blocks.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([2, 3, 4])
+ >>> np.vstack((a,b))
+ array([[1, 2, 3],
+ [2, 3, 4]])
+
+ >>> a = np.array([[1], [2], [3]])
+ >>> b = np.array([[2], [3], [4]])
+ >>> np.vstack((a,b))
+ array([[1],
+ [2],
+ [3],
+ [2],
+ [3],
+ [4]])
+
+ """
+ _warn_for_nonsequence(tup)
+ return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def hstack(tup):
+ """
+ Stack arrays in sequence horizontally (column wise).
+
+ This is equivalent to concatenation along the second axis, except for 1-D
+ arrays where it concatenates along the first axis. Rebuilds arrays divided
+ by `hsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ Parameters
+ ----------
+ tup : sequence of ndarrays
+ The arrays must have the same shape along all but the second axis,
+ except 1-D arrays which can be any length.
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays.
+
+ See Also
+ --------
+ stack : Join a sequence of arrays along a new axis.
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ concatenate : Join a sequence of arrays along an existing axis.
+ hsplit : Split array along second axis.
+ block : Assemble arrays from blocks.
+
+ Examples
+ --------
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((2,3,4))
+ >>> np.hstack((a,b))
+ array([1, 2, 3, 2, 3, 4])
+ >>> a = np.array([[1],[2],[3]])
+ >>> b = np.array([[2],[3],[4]])
+ >>> np.hstack((a,b))
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ _warn_for_nonsequence(tup)
+ arrs = [atleast_1d(_m) for _m in tup]
+ # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
+ if arrs and arrs[0].ndim == 1:
+ return _nx.concatenate(arrs, 0)
+ else:
+ return _nx.concatenate(arrs, 1)
+
+
+def _stack_dispatcher(arrays, axis=None, out=None):
+ arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_dispatch(_stack_dispatcher)
+def stack(arrays, axis=0, out=None):
+ """
+ Join a sequence of arrays along a new axis.
+
+ The `axis` parameter specifies the index of the new axis in the dimensions
+ of the result. For example, if ``axis=0`` it will be the first dimension
+ and if ``axis=-1`` it will be the last dimension.
+
+ .. versionadded:: 1.10.0
+
+ Parameters
+ ----------
+ arrays : sequence of array_like
+ Each array must have the same shape.
+ axis : int, optional
+ The axis in the result array along which the input arrays are stacked.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what stack would have returned if no
+ out argument were specified.
+
+ Returns
+ -------
+ stacked : ndarray
+ The stacked array has one more dimension than the input arrays.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ block : Assemble arrays from blocks.
+
+ Examples
+ --------
+ >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
+ >>> np.stack(arrays, axis=0).shape
+ (10, 3, 4)
+
+ >>> np.stack(arrays, axis=1).shape
+ (3, 10, 4)
+
+ >>> np.stack(arrays, axis=2).shape
+ (3, 4, 10)
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([2, 3, 4])
+ >>> np.stack((a, b))
+ array([[1, 2, 3],
+ [2, 3, 4]])
+
+ >>> np.stack((a, b), axis=-1)
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ _warn_for_nonsequence(arrays)
+ arrays = [asanyarray(arr) for arr in arrays]
+ if not arrays:
+ raise ValueError('need at least one array to stack')
+
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+
+ result_ndim = arrays[0].ndim + 1
+ axis = normalize_axis_index(axis, result_ndim)
+
+ sl = (slice(None),) * axis + (_nx.newaxis,)
+ expanded_arrays = [arr[sl] for arr in arrays]
+ return _nx.concatenate(expanded_arrays, axis=axis, out=out)
+
+
+def _block_format_index(index):
+ """
+ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
+ """
+ idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
+ return 'arrays' + idx_str
+
+
+def _block_check_depths_match(arrays, parent_index=[]):
+ """
+ Recursive function checking that the depths of nested lists in `arrays`
+ all match. Mismatch raises a ValueError as described in the block
+ docstring below.
+
+ The entire index (rather than just the depth) needs to be calculated
+ for each innermost list, in case an error needs to be raised, so that
+ the index of the offending list can be printed as part of the error.
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ parent_index : list of int
+ The full index of `arrays` within the nested lists passed to
+ `_block_check_depths_match` at the top of the recursion.
+
+ Returns
+ -------
+ first_index : list of int
+ The full index of an element from the bottom of the nesting in
+ `arrays`. If any element at the bottom is an empty list, this will
+ refer to it, and the last index along the empty axis will be `None`.
+ max_arr_ndim : int
+ The maximum of the ndims of the arrays nested in `arrays`.
+ final_size: int
+ The number of elements in the final array. This is used the motivate
+ the choice of algorithm used using benchmarking wisdom.
+
+ """
+ if type(arrays) is tuple:
+ # not strictly necessary, but saves us from:
+ # - more than one way to do things - no point treating tuples like
+ # lists
+ # - horribly confusing behaviour that results when tuples are
+ # treated like ndarray
+ raise TypeError(
+ '{} is a tuple. '
+ 'Only lists can be used to arrange blocks, and np.block does '
+ 'not allow implicit conversion from tuple to ndarray.'.format(
+ _block_format_index(parent_index)
+ )
+ )
+ elif type(arrays) is list and len(arrays) > 0:
+ idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
+ for i, arr in enumerate(arrays))
+
+ first_index, max_arr_ndim, final_size = next(idxs_ndims)
+ for index, ndim, size in idxs_ndims:
+ final_size += size
+ if ndim > max_arr_ndim:
+ max_arr_ndim = ndim
+ if len(index) != len(first_index):
+ raise ValueError(
+ "List depths are mismatched. First element was at depth "
+ "{}, but there is an element at depth {} ({})".format(
+ len(first_index),
+ len(index),
+ _block_format_index(index)
+ )
+ )
+ # propagate our flag that indicates an empty list at the bottom
+ if index[-1] is None:
+ first_index = index
+
+ return first_index, max_arr_ndim, final_size
+ elif type(arrays) is list and len(arrays) == 0:
+ # We've 'bottomed out' on an empty list
+ return parent_index + [None], 0, 0
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ size = _nx.size(arrays)
+ return parent_index, _nx.ndim(arrays), size
+
+
+def _atleast_nd(a, ndim):
+ # Ensures `a` has at least `ndim` dimensions by prepending
+ # ones to `a.shape` as necessary
+ return array(a, ndmin=ndim, copy=False, subok=True)
+
+
+def _accumulate(values):
+ # Helper function because Python 2.7 doesn't have
+ # itertools.accumulate
+ value = 0
+ accumulated = []
+ for v in values:
+ value += v
+ accumulated.append(value)
+ return accumulated
+
+
+def _concatenate_shapes(shapes, axis):
+ """Given array shapes, return the resulting shape and slices prefixes.
+
+ These help in nested concatation.
+ Returns
+ -------
+ shape: tuple of int
+ This tuple satisfies:
+ ```
+ shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
+ shape == concatenate(arrs, axis).shape
+ ```
+
+ slice_prefixes: tuple of (slice(start, end), )
+ For a list of arrays being concatenated, this returns the slice
+ in the larger array at axis that needs to be sliced into.
+
+ For example, the following holds:
+ ```
+ ret = concatenate([a, b, c], axis)
+ _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
+
+ ret[(slice(None),) * axis + sl_a] == a
+ ret[(slice(None),) * axis + sl_b] == b
+ ret[(slice(None),) * axis + sl_c] == c
+ ```
+
+ Thses are called slice prefixes since they are used in the recursive
+ blocking algorithm to compute the left-most slices during the
+ recursion. Therefore, they must be prepended to rest of the slice
+ that was computed deeper in the recusion.
+
+ These are returned as tuples to ensure that they can quickly be added
+ to existing slice tuple without creating a new tuple everytime.
+
+ """
+ # Cache a result that will be reused.
+ shape_at_axis = [shape[axis] for shape in shapes]
+
+ # Take a shape, any shape
+ first_shape = shapes[0]
+ first_shape_pre = first_shape[:axis]
+ first_shape_post = first_shape[axis+1:]
+
+ if any(shape[:axis] != first_shape_pre or
+ shape[axis+1:] != first_shape_post for shape in shapes):
+ raise ValueError(
+ 'Mismatched array shapes in block along axis {}.'.format(axis))
+
+ shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
+
+ offsets_at_axis = _accumulate(shape_at_axis)
+ slice_prefixes = [(slice(start, end),)
+ for start, end in zip([0] + offsets_at_axis,
+ offsets_at_axis)]
+ return shape, slice_prefixes
+
+
+def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
+ """
+ Returns the shape of the final array, along with a list
+ of slices and a list of arrays that can be used for assignment inside the
+ new array
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ max_depth : list of int
+ The number of nested lists
+ result_ndim: int
+ The number of dimensions in thefinal array.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape that the final array will take on.
+ slices: list of tuple of slices
+ The slices into the full array required for assignment. These are
+ required to be prepended with ``(Ellipsis, )`` to obtain to correct
+ final index.
+ arrays: list of ndarray
+ The data to assign to each slice of the full array
+
+ """
+ if depth < max_depth:
+ shapes, slices, arrays = zip(
+ *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays])
+
+ axis = result_ndim - max_depth + depth
+ shape, slice_prefixes = _concatenate_shapes(shapes, axis)
+
+ # Prepend the slice prefix and flatten the slices
+ slices = [slice_prefix + the_slice
+ for slice_prefix, inner_slices in zip(slice_prefixes, slices)
+ for the_slice in inner_slices]
+
+ # Flatten the array list
+ arrays = functools.reduce(operator.add, arrays)
+
+ return shape, slices, arrays
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ # Return the slice and the array inside a list to be consistent with
+ # the recursive case.
+ arr = _atleast_nd(arrays, result_ndim)
+ return arr.shape, [()], [arr]
+
+
+def _block(arrays, max_depth, result_ndim, depth=0):
+ """
+ Internal implementation of block based on repeated concatenation.
+ `arrays` is the argument passed to
+ block. `max_depth` is the depth of nested lists within `arrays` and
+ `result_ndim` is the greatest of the dimensions of the arrays in
+ `arrays` and the depth of the lists in `arrays` (see block docstring
+ for details).
+ """
+ if depth < max_depth:
+ arrs = [_block(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays]
+ return _nx.concatenate(arrs, axis=-(max_depth-depth))
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ return _atleast_nd(arrays, result_ndim)
+
+
+def _block_dispatcher(arrays):
+ # Use type(...) is list to match the behavior of np.block(), which special
+ # cases list specifically rather than allowing for generic iterables or
+ # tuple. Also, we know that list.__array_function__ will never exist.
+ if type(arrays) is list:
+ for subarrays in arrays:
+ for subarray in _block_dispatcher(subarrays):
+ yield subarray
+ else:
+ yield arrays
+
+
+@array_function_dispatch(_block_dispatcher)
+def block(arrays):
+ """
+ Assemble an nd-array from nested lists of blocks.
+
+ Blocks in the innermost lists are concatenated (see `concatenate`) along
+ the last dimension (-1), then these are concatenated along the
+ second-last dimension (-2), and so on until the outermost list is reached.
+
+ Blocks can be of any dimension, but will not be broadcasted using the normal
+ rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
+ the same for all blocks. This is primarily useful for working with scalars,
+ and means that code like ``np.block([v, 1])`` is valid, where
+ ``v.ndim == 1``.
+
+ When the nested list is two levels deep, this allows block matrices to be
+ constructed from their components.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ arrays : nested list of array_like or scalars (but not tuples)
+ If passed a single ndarray or scalar (a nested list of depth 0), this
+ is returned unmodified (and not copied).
+
+ Elements shapes must match along the appropriate axes (without
+ broadcasting), but leading 1s will be prepended to the shape as
+ necessary to make the dimensions match.
+
+ Returns
+ -------
+ block_array : ndarray
+ The array assembled from the given blocks.
+
+ The dimensionality of the output is equal to the greatest of:
+ * the dimensionality of all the inputs
+ * the depth to which the input list is nested
+
+ Raises
+ ------
+ ValueError
+ * If list depths are mismatched - for instance, ``[[a, b], c]`` is
+ illegal, and should be spelt ``[[a, b], [c]]``
+ * If lists are empty - for instance, ``[[a, b], []]``
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays together.
+ stack : Stack arrays in sequence along a new dimension.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+ vsplit : Split array into a list of multiple sub-arrays vertically.
+
+ Notes
+ -----
+
+ When called with only scalars, ``np.block`` is equivalent to an ndarray
+ call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
+ ``np.array([[1, 2], [3, 4]])``.
+
+ This function does not enforce that the blocks lie on a fixed grid.
+ ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
+
+ AAAbb
+ AAAbb
+ cccDD
+
+ But is also allowed to produce, for some ``a, b, c, d``::
+
+ AAAbb
+ AAAbb
+ cDDDD
+
+ Since concatenation happens along the last axis first, `block` is _not_
+ capable of producing the following directly::
+
+ AAAbb
+ cccbb
+ cccDD
+
+ Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
+ equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
+
+ Examples
+ --------
+ The most common use of this function is to build a block matrix
+
+ >>> A = np.eye(2) * 2
+ >>> B = np.eye(3) * 3
+ >>> np.block([
+ ... [A, np.zeros((2, 3))],
+ ... [np.ones((3, 2)), B ]
+ ... ])
+ array([[ 2., 0., 0., 0., 0.],
+ [ 0., 2., 0., 0., 0.],
+ [ 1., 1., 3., 0., 0.],
+ [ 1., 1., 0., 3., 0.],
+ [ 1., 1., 0., 0., 3.]])
+
+ With a list of depth 1, `block` can be used as `hstack`
+
+ >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
+ array([1, 2, 3])
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([2, 3, 4])
+ >>> np.block([a, b, 10]) # hstack([a, b, 10])
+ array([1, 2, 3, 2, 3, 4, 10])
+
+ >>> A = np.ones((2, 2), int)
+ >>> B = 2 * A
+ >>> np.block([A, B]) # hstack([A, B])
+ array([[1, 1, 2, 2],
+ [1, 1, 2, 2]])
+
+ With a list of depth 2, `block` can be used in place of `vstack`:
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([2, 3, 4])
+ >>> np.block([[a], [b]]) # vstack([a, b])
+ array([[1, 2, 3],
+ [2, 3, 4]])
+
+ >>> A = np.ones((2, 2), int)
+ >>> B = 2 * A
+ >>> np.block([[A], [B]]) # vstack([A, B])
+ array([[1, 1],
+ [1, 1],
+ [2, 2],
+ [2, 2]])
+
+ It can also be used in places of `atleast_1d` and `atleast_2d`
+
+ >>> a = np.array(0)
+ >>> b = np.array([1])
+ >>> np.block([a]) # atleast_1d(a)
+ array([0])
+ >>> np.block([b]) # atleast_1d(b)
+ array([1])
+
+ >>> np.block([[a]]) # atleast_2d(a)
+ array([[0]])
+ >>> np.block([[b]]) # atleast_2d(b)
+ array([[1]])
+
+
+ """
+ arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
+
+ # It was found through benchmarking that making an array of final size
+ # around 256x256 was faster by straight concatenation on a
+ # i7-7700HQ processor and dual channel ram 2400MHz.
+ # It didn't seem to matter heavily on the dtype used.
+ #
+ # A 2D array using repeated concatenation requires 2 copies of the array.
+ #
+ # The fastest algorithm will depend on the ratio of CPU power to memory
+ # speed.
+ # One can monitor the results of the benchmark
+ # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
+ # to tune this parameter until a C version of the `_block_info_recursion`
+ # algorithm is implemented which would likely be faster than the python
+ # version.
+ if list_ndim * final_size > (2 * 512 * 512):
+ return _block_slicing(arrays, list_ndim, result_ndim)
+ else:
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+
+# Theses helper functions are mostly used for testing.
+# They allow us to write tests that directly call `_block_slicing`
+# or `_block_concatenate` wtihout blocking large arrays to forse the wisdom
+# to trigger the desired path.
+def _block_setup(arrays):
+ """
+ Returns
+ (`arrays`, list_ndim, result_ndim, final_size)
+ """
+ bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
+ list_ndim = len(bottom_index)
+ if bottom_index and bottom_index[-1] is None:
+ raise ValueError(
+ 'List at {} cannot be empty'.format(
+ _block_format_index(bottom_index)
+ )
+ )
+ result_ndim = max(arr_ndim, list_ndim)
+ return arrays, list_ndim, result_ndim, final_size
+
+
+def _block_slicing(arrays, list_ndim, result_ndim):
+ shape, slices, arrays = _block_info_recursion(
+ arrays, list_ndim, result_ndim)
+ dtype = _nx.result_type(*[arr.dtype for arr in arrays])
+
+ # Test preferring F only in the case that all input arrays are F
+ F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
+ C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
+ order = 'F' if F_order and not C_order else 'C'
+ result = _nx.empty(shape=shape, dtype=dtype, order=order)
+ # Note: In a c implementation, the function
+ # PyArray_CreateMultiSortedStridePerm could be used for more advanced
+ # guessing of the desired order.
+
+ for the_slice, arr in zip(slices, arrays):
+ result[(Ellipsis,) + the_slice] = arr
+ return result
+
+
+def _block_concatenate(arrays, list_ndim, result_ndim):
+ result = _block(arrays, list_ndim, result_ndim)
+ if list_ndim == 0:
+ # Catch an edge case where _block returns a view because
+ # `arrays` is a single numpy array and not a list of numpy arrays.
+ # This might copy scalars or lists twice, but this isn't a likely
+ # usecase for those interested in performance
+ result = result.copy()
+ return result
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc
new file mode 100644
index 0000000..dd1aa41
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/shape_base.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc
new file mode 100644
index 0000000..e809cf8
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/__init__.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py
new file mode 100644
index 0000000..52e4ff3
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.py
@@ -0,0 +1,76 @@
+"""Provide class for testing in French locale
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import locale
+
+import pytest
+
+__ALL__ = ['CommaDecimalPointLocale']
+
+
+def find_comma_decimal_point_locale():
+ """See if platform has a decimal point as comma locale.
+
+ Find a locale that uses a comma instead of a period as the
+ decimal point.
+
+ Returns
+ -------
+ old_locale: str
+ Locale when the function was called.
+ new_locale: {str, None)
+ First French locale found, None if none found.
+
+ """
+ if sys.platform == 'win32':
+ locales = ['FRENCH']
+ else:
+ locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
+
+ old_locale = locale.getlocale(locale.LC_NUMERIC)
+ new_locale = None
+ try:
+ for loc in locales:
+ try:
+ locale.setlocale(locale.LC_NUMERIC, loc)
+ new_locale = loc
+ break
+ except locale.Error:
+ pass
+ finally:
+ locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
+ return old_locale, new_locale
+
+
+class CommaDecimalPointLocale(object):
+ """Sets LC_NUMERIC to a locale with comma as decimal point.
+
+ Classes derived from this class have setup and teardown methods that run
+ tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
+ the decimal point instead of periods ('.'). On exit the locale is restored
+ to the initial locale. It also serves as context manager with the same
+ effect. If no such locale is available, the test is skipped.
+
+ .. versionadded:: 1.15.0
+
+ """
+ (cur_locale, tst_locale) = find_comma_decimal_point_locale()
+
+ def setup(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def teardown(self):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
+
+ def __enter__(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def __exit__(self, type, value, traceback):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc
new file mode 100644
index 0000000..ba68e27
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/_locales.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl
new file mode 100644
index 0000000..7397c97
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/astype_copy.pkl differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits
new file mode 100644
index 0000000..ca48ee8
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/data/recarray_from_file.fits differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py
new file mode 100644
index 0000000..d9c61b0
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.py
@@ -0,0 +1,56 @@
+from __future__ import division, absolute_import, print_function
+
+from numpy.testing import assert_
+
+import numbers
+
+import numpy as np
+from numpy.core.numerictypes import sctypes
+
+class TestABC(object):
+ def test_abstract(self):
+ assert_(issubclass(np.number, numbers.Number))
+
+ assert_(issubclass(np.inexact, numbers.Complex))
+ assert_(issubclass(np.complexfloating, numbers.Complex))
+ assert_(issubclass(np.floating, numbers.Real))
+
+ assert_(issubclass(np.integer, numbers.Integral))
+ assert_(issubclass(np.signedinteger, numbers.Integral))
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
+ def test_floats(self):
+ for t in sctypes['float']:
+ assert_(isinstance(t(), numbers.Real),
+ "{0} is not instance of Real".format(t.__name__))
+ assert_(issubclass(t, numbers.Real),
+ "{0} is not subclass of Real".format(t.__name__))
+ assert_(not isinstance(t(), numbers.Rational),
+ "{0} is instance of Rational".format(t.__name__))
+ assert_(not issubclass(t, numbers.Rational),
+ "{0} is subclass of Rational".format(t.__name__))
+
+ def test_complex(self):
+ for t in sctypes['complex']:
+ assert_(isinstance(t(), numbers.Complex),
+ "{0} is not instance of Complex".format(t.__name__))
+ assert_(issubclass(t, numbers.Complex),
+ "{0} is not subclass of Complex".format(t.__name__))
+ assert_(not isinstance(t(), numbers.Real),
+ "{0} is instance of Real".format(t.__name__))
+ assert_(not issubclass(t, numbers.Real),
+ "{0} is subclass of Real".format(t.__name__))
+
+ def test_int(self):
+ for t in sctypes['int']:
+ assert_(isinstance(t(), numbers.Integral),
+ "{0} is not instance of Integral".format(t.__name__))
+ assert_(issubclass(t, numbers.Integral),
+ "{0} is not subclass of Integral".format(t.__name__))
+
+ def test_uint(self):
+ for t in sctypes['uint']:
+ assert_(isinstance(t(), numbers.Integral),
+ "{0} is not instance of Integral".format(t.__name__))
+ assert_(issubclass(t, numbers.Integral),
+ "{0} is not subclass of Integral".format(t.__name__))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc
new file mode 100644
index 0000000..f904b25
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_abc.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py
new file mode 100644
index 0000000..9755e7b
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.py
@@ -0,0 +1,516 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT
+ )
+
+# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
+NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
+
+
+def test_array_array():
+ tobj = type(object)
+ ones11 = np.ones((1, 1), np.float64)
+ tndarray = type(ones11)
+ # Test is_ndarray
+ assert_equal(np.array(ones11, dtype=np.float64), ones11)
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tndarray)
+ np.array(ones11)
+ assert_equal(old_refcount, sys.getrefcount(tndarray))
+
+ # test None
+ assert_equal(np.array(None, dtype=np.float64),
+ np.array(np.nan, dtype=np.float64))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tobj)
+ np.array(None, dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(tobj))
+
+ # test scalar
+ assert_equal(np.array(1.0, dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(np.float64)
+ np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(np.float64))
+
+ # test string
+ S2 = np.dtype((str, 2))
+ S3 = np.dtype((str, 3))
+ S5 = np.dtype((str, 5))
+ assert_equal(np.array("1.0", dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array("1.0").dtype, S3)
+ assert_equal(np.array("1.0", dtype=str).dtype, S3)
+ assert_equal(np.array("1.0", dtype=S2), np.array("1."))
+ assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5))
+
+ # test unicode
+ _unicode = globals().get("unicode")
+ if _unicode:
+ U2 = np.dtype((_unicode, 2))
+ U3 = np.dtype((_unicode, 3))
+ U5 = np.dtype((_unicode, 5))
+ assert_equal(np.array(_unicode("1.0"), dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array(_unicode("1.0")).dtype, U3)
+ assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3)
+ assert_equal(np.array(_unicode("1.0"), dtype=U2),
+ np.array(_unicode("1.")))
+ assert_equal(np.array(_unicode("1"), dtype=U5),
+ np.ones((), dtype=U5))
+
+ builtins = getattr(__builtins__, '__dict__', __builtins__)
+ assert_(hasattr(builtins, 'get'))
+
+ # test buffer
+ _buffer = builtins.get("buffer")
+ if _buffer and sys.version_info[:3] >= (2, 7, 5):
+ # This test fails for earlier versions of Python.
+ # Evidently a bug got fixed in 2.7.5.
+ dat = np.array(_buffer('1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(_buffer(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
+
+ # test memoryview, new version of buffer
+ _memoryview = builtins.get("memoryview")
+ if _memoryview:
+ dat = np.array(_memoryview(b'1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(_memoryview(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
+
+ # test array interface
+ a = np.array(100.0, dtype=np.float64)
+ o = type("o", (object,),
+ dict(__array_interface__=a.__array_interface__))
+ assert_equal(np.array(o, dtype=np.float64), a)
+
+ # test array_struct interface
+ a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
+ dtype=[('f0', int), ('f1', float), ('f2', str)])
+ o = type("o", (object,),
+ dict(__array_struct__=a.__array_struct__))
+ ## wasn't what I expected... is np.array(o) supposed to equal a ?
+ ## instead we get a array([...], dtype=">V18")
+ assert_equal(bytes(np.array(o).data), bytes(a.data))
+
+ # test array
+ o = type("o", (object,),
+ dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
+ assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
+
+ # test recursion
+ nested = 1.5
+ for i in range(np.MAXDIMS):
+ nested = [nested]
+
+ # no error
+ np.array(nested)
+
+ # Exceeds recursion limit
+ assert_raises(ValueError, np.array, [nested], dtype=np.float64)
+
+ # Try with lists...
+ assert_equal(np.array([None] * 10, dtype=np.float64),
+ np.full((10,), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None]] * 10, dtype=np.float64),
+ np.full((10, 1), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None] * 10], dtype=np.float64),
+ np.full((1, 10), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
+ np.full((10, 10), np.nan, dtype=np.float64))
+
+ assert_equal(np.array([1.0] * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
+ # Try with tuples
+ assert_equal(np.array((None,) * 10, dtype=np.float64),
+ np.full((10,), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,)] * 10, dtype=np.float64),
+ np.full((10, 1), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,) * 10], dtype=np.float64),
+ np.full((1, 10), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
+ np.full((10, 10), np.nan, dtype=np.float64))
+
+ assert_equal(np.array((1.0,) * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
+
+def test_fastCopyAndTranspose():
+ # 0D array
+ a = np.array(2)
+ b = np.fastCopyAndTranspose(a)
+ assert_equal(b, a.T)
+ assert_(b.flags.owndata)
+
+ # 1D array
+ a = np.array([3, 2, 7, 0])
+ b = np.fastCopyAndTranspose(a)
+ assert_equal(b, a.T)
+ assert_(b.flags.owndata)
+
+ # 2D array
+ a = np.arange(6).reshape(2, 3)
+ b = np.fastCopyAndTranspose(a)
+ assert_equal(b, a.T)
+ assert_(b.flags.owndata)
+
+def test_array_astype():
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+ # Default behavior: allows unsafe casts, keeps memory layout,
+ # always copies.
+ b = a.astype('i4')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(a.strides, b.strides)
+ b = a.T.astype('i4')
+ assert_equal(a.T, b)
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(a.T.strides, b.strides)
+ b = a.astype('f4')
+ assert_equal(a, b)
+ assert_(not (a is b))
+
+ # copy=False parameter can sometimes skip a copy
+ b = a.astype('f4', copy=False)
+ assert_(a is b)
+
+ # order parameter allows overriding of the memory layout,
+ # forcing a copy if the layout is wrong
+ b = a.astype('f4', order='F', copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(b.flags.f_contiguous)
+
+ b = a.astype('f4', order='C', copy=False)
+ assert_equal(a, b)
+ assert_(a is b)
+ assert_(b.flags.c_contiguous)
+
+ # casting parameter allows catching bad casts
+ b = a.astype('c8', casting='safe')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('c8'))
+
+ assert_raises(TypeError, a.astype, 'i4', casting='safe')
+
+ # subok=False passes through a non-subclassed array
+ b = a.astype('f4', subok=0, copy=False)
+ assert_(a is b)
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), MyNDArray)
+
+ # subok=False never returns a subclass
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not MyNDArray)
+
+ # Make sure converting from string object to fixed length string
+ # does not truncate.
+ a = np.array([b'a'*100], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S100'))
+ a = np.array([u'a'*100], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U100'))
+
+ # Same test as above but for strings shorter than 64 characters
+ a = np.array([b'a'*10], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S10'))
+ a = np.array([u'a'*10], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U10'))
+
+ a = np.array(123456789012345678901234567890, dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='O').astype('U')
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
+
+ a = np.array([123456789012345678901234567890], dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array([123456789012345678901234567890], dtype='O').astype('U')
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
+
+ a = np.array(123456789012345678901234567890, dtype='S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='U')
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
+
+ a = np.array(u'a\u0140', dtype='U')
+ b = np.ndarray(buffer=a, dtype='uint32', shape=2)
+ assert_(b.size == 2)
+
+ a = np.array([1000], dtype='i4')
+ assert_raises(TypeError, a.astype, 'S1', casting='safe')
+
+ a = np.array(1000, dtype='i4')
+ assert_raises(TypeError, a.astype, 'U1', casting='safe')
+
+def test_copyto_fromscalar():
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+
+ # Simple copy
+ np.copyto(a, 1.5)
+ assert_equal(a, 1.5)
+ np.copyto(a.T, 2.5)
+ assert_equal(a, 2.5)
+
+ # Where-masked copy
+ mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
+ np.copyto(a, 3.5, where=mask)
+ assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
+ mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
+ np.copyto(a.T, 4.5, where=mask)
+ assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
+
+def test_copyto():
+ a = np.arange(6, dtype='i4').reshape(2, 3)
+
+ # Simple copy
+ np.copyto(a, [[3, 1, 5], [6, 2, 1]])
+ assert_equal(a, [[3, 1, 5], [6, 2, 1]])
+
+ # Overlapping copy should work
+ np.copyto(a[:, :2], a[::-1, 1::-1])
+ assert_equal(a, [[2, 6, 5], [1, 3, 1]])
+
+ # Defaults to 'same_kind' casting
+ assert_raises(TypeError, np.copyto, a, 1.5)
+
+ # Force a copy with 'unsafe' casting, truncating 1.5 to 1
+ np.copyto(a, 1.5, casting='unsafe')
+ assert_equal(a, 1)
+
+ # Copying with a mask
+ np.copyto(a, 3, where=[True, False, True])
+ assert_equal(a, [[3, 1, 3], [3, 1, 3]])
+
+ # Casting rule still applies with a mask
+ assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
+
+ # Lists of integer 0's and 1's is ok too
+ np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
+ assert_equal(a, [[3, 4, 4], [4, 1, 3]])
+
+ # Overlapping copy with mask should work
+ np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
+ assert_equal(a, [[3, 4, 4], [4, 3, 3]])
+
+ # 'dst' must be an array
+ assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
+
+def test_copyto_permut():
+ # test explicit overflow case
+ pad = 500
+ l = [True] * pad + [True, True, True, True]
+ r = np.zeros(len(l)-pad)
+ d = np.ones(len(l)-pad)
+ mask = np.array(l)[pad:]
+ np.copyto(r, d, where=mask[::-1])
+
+ # test all permutation of possible masks, 9 should be sufficient for
+ # current 4 byte unrolled code
+ power = 9
+ d = np.ones(power)
+ for i in range(2**power):
+ r = np.zeros(power)
+ l = [(i & x) != 0 for x in range(power)]
+ mask = np.array(l)
+ np.copyto(r, d, where=mask)
+ assert_array_equal(r == 1, l)
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r, d, where=mask[::-1])
+ assert_array_equal(r == 1, l[::-1])
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r[::2], d[::2], where=mask[::2])
+ assert_array_equal(r[::2] == 1, l[::2])
+ assert_equal(r[::2].sum(), sum(l[::2]))
+
+ r = np.zeros(power)
+ np.copyto(r[::2], d[::2], where=mask[::-2])
+ assert_array_equal(r[::2] == 1, l[::-2])
+ assert_equal(r[::2].sum(), sum(l[::-2]))
+
+ for c in [0xFF, 0x7F, 0x02, 0x10]:
+ r = np.zeros(power)
+ mask = np.array(l)
+ imask = np.array(l).view(np.uint8)
+ imask[mask != 0] = c
+ np.copyto(r, d, where=mask)
+ assert_array_equal(r == 1, l)
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r, d, where=True)
+ assert_equal(r.sum(), r.size)
+ r = np.ones(power)
+ d = np.zeros(power)
+ np.copyto(r, d, where=False)
+ assert_equal(r.sum(), r.size)
+
+def test_copy_order():
+ a = np.arange(24).reshape(2, 1, 3, 4)
+ b = a.copy(order='F')
+ c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
+
+ def check_copy_result(x, y, ccontig, fcontig, strides=False):
+ assert_(not (x is y))
+ assert_equal(x, y)
+ assert_equal(res.flags.c_contiguous, ccontig)
+ assert_equal(res.flags.f_contiguous, fcontig)
+ # This check is impossible only because
+ # NPY_RELAXED_STRIDES_CHECKING changes the strides actively
+ if not NPY_RELAXED_STRIDES_CHECKING:
+ if strides:
+ assert_equal(x.strides, y.strides)
+ else:
+ assert_(x.strides != y.strides)
+
+ # Validate the initial state of a, b, and c
+ assert_(a.flags.c_contiguous)
+ assert_(not a.flags.f_contiguous)
+ assert_(not b.flags.c_contiguous)
+ assert_(b.flags.f_contiguous)
+ assert_(not c.flags.c_contiguous)
+ assert_(not c.flags.f_contiguous)
+
+ # Copy with order='C'
+ res = a.copy(order='C')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = b.copy(order='C')
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+ res = c.copy(order='C')
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+ res = np.copy(a, order='C')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = np.copy(b, order='C')
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+ res = np.copy(c, order='C')
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+
+ # Copy with order='F'
+ res = a.copy(order='F')
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+ res = b.copy(order='F')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = c.copy(order='F')
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+ res = np.copy(a, order='F')
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+ res = np.copy(b, order='F')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = np.copy(c, order='F')
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+
+ # Copy with order='K'
+ res = a.copy(order='K')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = b.copy(order='K')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = c.copy(order='K')
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+ res = np.copy(a, order='K')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = np.copy(b, order='K')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = np.copy(c, order='K')
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+
+def test_contiguous_flags():
+ a = np.ones((4, 4, 1))[::2,:,:]
+ if NPY_RELAXED_STRIDES_CHECKING:
+ a.strides = a.strides[:2] + (-123,)
+ b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
+
+ def check_contig(a, ccontig, fcontig):
+ assert_(a.flags.c_contiguous == ccontig)
+ assert_(a.flags.f_contiguous == fcontig)
+
+ # Check if new arrays are correct:
+ check_contig(a, False, False)
+ check_contig(b, False, False)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
+ check_contig(np.array([[[1], [2]]], order='F'), True, True)
+ else:
+ check_contig(np.empty((2, 2, 0, 2, 2)), True, False)
+ check_contig(np.array([[[1], [2]]], order='F'), False, True)
+ check_contig(np.empty((2, 2)), True, False)
+ check_contig(np.empty((2, 2), order='F'), False, True)
+
+ # Check that np.array creates correct contiguous flags:
+ check_contig(np.array(a, copy=False), False, False)
+ check_contig(np.array(a, copy=False, order='C'), True, False)
+ check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
+
+ if NPY_RELAXED_STRIDES_CHECKING:
+ # Check slicing update of flags and :
+ check_contig(a[0], True, True)
+ check_contig(a[None, ::4, ..., None], True, True)
+ check_contig(b[0, 0, ...], False, True)
+ check_contig(b[:,:, 0:0,:,:], True, True)
+ else:
+ # Check slicing update of flags:
+ check_contig(a[0], True, False)
+ # Would be nice if this was C-Contiguous:
+ check_contig(a[None, 0, ..., None], False, False)
+ check_contig(b[0, 0, 0, ...], False, True)
+
+ # Test ravel and squeeze.
+ check_contig(a.ravel(), True, True)
+ check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
+
+def test_broadcast_arrays():
+ # Test user defined dtypes
+ a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
+ b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
+ result = np.broadcast_arrays(a, b)
+ assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
+ assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc
new file mode 100644
index 0000000..28be9b0
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_api.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py
new file mode 100644
index 0000000..f2b8fdc
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py
@@ -0,0 +1,893 @@
+# -*- coding: utf-8 -*-
+from __future__ import division, absolute_import, print_function
+
+import sys
+import gc
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+ assert_raises_regex,
+ )
+import textwrap
+
+class TestArrayRepr(object):
+ def test_nan_inf(self):
+ x = np.array([np.nan, np.inf])
+ assert_equal(repr(x), 'array([nan, inf])')
+
+ def test_subclass(self):
+ class sub(np.ndarray): pass
+
+ # one dimensional
+ x1d = np.array([1, 2]).view(sub)
+ assert_equal(repr(x1d), 'sub([1, 2])')
+
+ # two dimensional
+ x2d = np.array([[1, 2], [3, 4]]).view(sub)
+ assert_equal(repr(x2d),
+ 'sub([[1, 2],\n'
+ ' [3, 4]])')
+
+ # two dimensional with flexible dtype
+ xstruct = np.ones((2,2), dtype=[('a', ' 1)
+ y = sub(None)
+ x[()] = y
+ y[()] = x
+ assert_equal(repr(x),
+ 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
+ assert_equal(str(x), '...')
+ x[()] = 0 # resolve circular references for garbage collector
+
+ # nested 0d-subclass-object
+ x = sub(None)
+ x[()] = sub(None)
+ assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
+ assert_equal(str(x), 'None')
+
+ # gh-10663
+ class DuckCounter(np.ndarray):
+ def __getitem__(self, item):
+ result = super(DuckCounter, self).__getitem__(item)
+ if not isinstance(result, DuckCounter):
+ result = result[...].view(DuckCounter)
+ return result
+
+ def to_string(self):
+ return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
+
+ def __str__(self):
+ if self.shape == ():
+ return self.to_string()
+ else:
+ fmt = {'all': lambda x: x.to_string()}
+ return np.array2string(self, formatter=fmt)
+
+ dc = np.arange(5).view(DuckCounter)
+ assert_equal(str(dc), "[zero one two many many]")
+ assert_equal(str(dc[0]), "zero")
+
+ def test_self_containing(self):
+ arr0d = np.array(None)
+ arr0d[()] = arr0d
+ assert_equal(repr(arr0d),
+ 'array(array(..., dtype=object), dtype=object)')
+ arr0d[()] = 0 # resolve recursion for garbage collector
+
+ arr1d = np.array([None, None])
+ arr1d[1] = arr1d
+ assert_equal(repr(arr1d),
+ 'array([None, array(..., dtype=object)], dtype=object)')
+ arr1d[1] = 0 # resolve recursion for garbage collector
+
+ first = np.array(None)
+ second = np.array(None)
+ first[()] = second
+ second[()] = first
+ assert_equal(repr(first),
+ 'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+ first[()] = 0 # resolve circular references for garbage collector
+
+ def test_containing_list(self):
+ # printing square brackets directly would be ambiguuous
+ arr1d = np.array([None, None])
+ arr1d[0] = [1, 2]
+ arr1d[1] = [3]
+ assert_equal(repr(arr1d),
+ 'array([list([1, 2]), list([3])], dtype=object)')
+
+ def test_void_scalar_recursion(self):
+ # gh-9345
+ repr(np.void(b'test')) # RecursionError ?
+
+ def test_fieldless_structured(self):
+ # gh-10366
+ no_fields = np.dtype([])
+ arr_no_fields = np.empty(4, dtype=no_fields)
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
+
+
+class TestComplexArray(object):
+ def test_str(self):
+ rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
+ cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
+ dtypes = [np.complex64, np.cdouble, np.clongdouble]
+ actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
+ wanted = [
+ '[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
+ '[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
+ '[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
+ '[0.+infj]', '[0.+infj]', '[0.+infj]',
+ '[0.-infj]', '[0.-infj]', '[0.-infj]',
+ '[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
+ '[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
+ '[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
+ '[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
+ '[1.+infj]', '[1.+infj]', '[1.+infj]',
+ '[1.-infj]', '[1.-infj]', '[1.-infj]',
+ '[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
+ '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
+ '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
+ '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
+ '[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
+ '[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
+ '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
+ '[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
+ '[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
+ '[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
+ '[inf+infj]', '[inf+infj]', '[inf+infj]',
+ '[inf-infj]', '[inf-infj]', '[inf-infj]',
+ '[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
+ '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
+ '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
+ '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
+ '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
+ '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
+ '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
+ '[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
+ '[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
+ '[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
+ '[nan+infj]', '[nan+infj]', '[nan+infj]',
+ '[nan-infj]', '[nan-infj]', '[nan-infj]',
+ '[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
+
+ for res, val in zip(actual, wanted):
+ assert_equal(res, val)
+
+class TestArray2String(object):
+ def test_basic(self):
+ """Basic test of array2string."""
+ a = np.arange(3)
+ assert_(np.array2string(a) == '[0 1 2]')
+ assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
+ assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
+
+ def test_unexpected_kwarg(self):
+ # ensure than an appropriate TypeError
+ # is raised when array2string receives
+ # an unexpected kwarg
+
+ with assert_raises_regex(TypeError, 'nonsense'):
+ np.array2string(np.array([1, 2, 3]),
+ nonsense=None)
+
+ def test_format_function(self):
+ """Test custom format function for each element in array."""
+ def _format_function(x):
+ if np.abs(x) < 1:
+ return '.'
+ elif np.abs(x) < 2:
+ return 'o'
+ else:
+ return 'O'
+
+ x = np.arange(3)
+ if sys.version_info[0] >= 3:
+ x_hex = "[0x0 0x1 0x2]"
+ x_oct = "[0o0 0o1 0o2]"
+ else:
+ x_hex = "[0x0L 0x1L 0x2L]"
+ x_oct = "[0L 01L 02L]"
+ assert_(np.array2string(x, formatter={'all':_format_function}) ==
+ "[. o O]")
+ assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
+ "[. o O]")
+ assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
+ "[0.0000 1.0000 2.0000]")
+ assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
+ x_hex)
+ assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
+ x_oct)
+
+ x = np.arange(3.)
+ assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
+ "[0.00 1.00 2.00]")
+ assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
+ "[0.00 1.00 2.00]")
+
+ s = np.array(['abc', 'def'])
+ assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
+ '[abcabc defdef]')
+
+ # check for backcompat that using FloatFormat works and emits warning
+ with assert_warns(DeprecationWarning):
+ fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
+ assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
+ '[0. 1. 2.]')
+
+ def test_structure_format(self):
+ dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_equal(np.array2string(x),
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
+
+ np.set_printoptions(legacy='1.13')
+ try:
+ # for issue #5692
+ A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
+ ('NaT',) ('NaT',) ('NaT',)]""")
+ )
+ finally:
+ np.set_printoptions(legacy=False)
+
+ # same again, but with non-legacy behavior
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # and again, with timedeltas
+ A = np.full(10, 123456, dtype=[("A", "m8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # See #8160
+ struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
+ assert_equal(np.array2string(struct_int),
+ "[([ 1, -1],) ([123, 1],)]")
+ struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
+ dtype=[('B', 'i4', (2, 2))])
+ assert_equal(np.array2string(struct_2dint),
+ "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
+
+ # See #8172
+ array_scalar = np.array(
+ (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
+
+ def test_unstructured_void_repr(self):
+ a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
+ 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
+ assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
+ assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
+ assert_equal(repr(a),
+ r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
+ r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
+
+ assert_equal(eval(repr(a), vars(np)), a)
+ assert_equal(eval(repr(a[0]), vars(np)), a[0])
+
+ def test_edgeitems_kwarg(self):
+ # previously the global print options would be taken over the kwarg
+ arr = np.zeros(3, int)
+ assert_equal(
+ np.array2string(arr, edgeitems=1, threshold=0),
+ "[0 ... 0]"
+ )
+
+ def test_summarize_1d(self):
+ A = np.arange(1001)
+ strA = '[ 0 1 2 ... 998 999 1000]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
+ assert_equal(repr(A), reprA)
+
+ def test_summarize_2d(self):
+ A = np.arange(1002).reshape(2, 501)
+ strA = '[[ 0 1 2 ... 498 499 500]\n' \
+ ' [ 501 502 503 ... 999 1000 1001]]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
+ ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
+ assert_equal(repr(A), reprA)
+
+ def test_linewidth(self):
+ a = np.full(6, 1)
+
+ def make_str(a, width, **kw):
+ return np.array2string(a, separator="", max_line_width=width, **kw)
+
+ assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
+ ' 11]')
+
+ assert_equal(make_str(a, 8), '[111111]')
+ assert_equal(make_str(a, 7), '[11111\n'
+ ' 1]')
+ assert_equal(make_str(a, 5), '[111\n'
+ ' 111]')
+
+ b = a[None,None,:]
+
+ assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
+ ' 1]]]')
+
+ assert_equal(make_str(b, 12), '[[[111111]]]')
+ assert_equal(make_str(b, 9), '[[[111\n'
+ ' 111]]]')
+ assert_equal(make_str(b, 8), '[[[11\n'
+ ' 11\n'
+ ' 11]]]')
+
+ def test_wide_element(self):
+ a = np.array(['xxxxx'])
+ assert_equal(
+ np.array2string(a, max_line_width=5),
+ "['xxxxx']"
+ )
+ assert_equal(
+ np.array2string(a, max_line_width=5, legacy='1.13'),
+ "[ 'xxxxx']"
+ )
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # make sure we do not hold references to the array due to a recursive
+ # closure (gh-10620)
+ gc.disable()
+ a = np.arange(2)
+ r1 = sys.getrefcount(a)
+ np.array2string(a)
+ np.array2string(a)
+ r2 = sys.getrefcount(a)
+ gc.collect()
+ gc.enable()
+ assert_(r1 == r2)
+
+class TestPrintOptions(object):
+ """Test getting and setting global print options."""
+
+ def setup(self):
+ self.oldopts = np.get_printoptions()
+
+ def teardown(self):
+ np.set_printoptions(**self.oldopts)
+
+ def test_basic(self):
+ x = np.array([1.5, 0, 1.234567890])
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
+ np.set_printoptions(precision=4)
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
+
+ def test_precision_zero(self):
+ np.set_printoptions(precision=0)
+ for values, string in (
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
+ x = np.array(values)
+ assert_equal(repr(x), "array([%s])" % string)
+
+ def test_formatter(self):
+ x = np.arange(3)
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+
+ def test_formatter_reset(self):
+ x = np.arange(3)
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'int':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'all':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ np.set_printoptions(formatter={'int':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'int_kind':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ x = np.arange(3.)
+ np.set_printoptions(formatter={'float':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
+ np.set_printoptions(formatter={'float_kind':None})
+ assert_equal(repr(x), "array([0., 1., 2.])")
+
+ def test_0d_arrays(self):
+ unicode = type(u'')
+
+ assert_equal(unicode(np.array(u'café', '= 3:
+ assert_equal(repr(np.array('café', '= 3 else '|S4'
+ assert_equal(repr(np.ones(3, dtype=styp)),
+ "array(['1', '1', '1'], dtype='{}')".format(styp))
+ assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
+ array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
+ dtype='{}')""".format(styp)))
+
+ def test_linewidth_repr(self):
+ a = np.full(7, fill_value=2)
+ np.set_printoptions(linewidth=17)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2])""")
+ )
+ np.set_printoptions(linewidth=17, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ a = np.full(8, fill_value=2)
+
+ np.set_printoptions(linewidth=18, legacy=False)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2, 2])""")
+ )
+
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ def test_linewidth_str(self):
+ a = np.full(18, fill_value=2)
+ np.set_printoptions(linewidth=18)
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2
+ 2 2]""")
+ )
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2 2]""")
+ )
+
+ def test_edgeitems(self):
+ np.set_printoptions(edgeitems=1, threshold=1)
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ b = np.zeros((3, 3, 1, 1))
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[0.]],
+
+ ...,
+
+ [[0.]]],
+
+
+ ...,
+
+
+ [[[0.]],
+
+ ...,
+
+ [[0.]]]])""")
+ )
+
+ # 1.13 had extra trailing spaces, and was missing newlines
+ np.set_printoptions(legacy='1.13')
+
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[ 0.]],
+
+ ...,
+ [[ 0.]]],
+
+
+ ...,
+ [[[ 0.]],
+
+ ...,
+ [[ 0.]]]])""")
+ )
+
+ def test_bad_args(self):
+ assert_raises(ValueError, np.set_printoptions, threshold='nan')
+ assert_raises(ValueError, np.set_printoptions, threshold=u'1')
+ assert_raises(ValueError, np.set_printoptions, threshold=b'1')
+
+def test_unicode_object_array():
+ import sys
+ if sys.version_info[0] >= 3:
+ expected = "array(['é'], dtype=object)"
+ else:
+ expected = "array([u'\\xe9'], dtype=object)"
+ x = np.array([u'\xe9'], dtype=object)
+ assert_equal(repr(x), expected)
+
+
+class TestContextManager(object):
+ def test_ctx_mgr(self):
+ # test that context manager actuall works
+ with np.printoptions(precision=2):
+ s = str(np.array([2.0]) / 3)
+ assert_equal(s, '[0.67]')
+
+ def test_ctx_mgr_restores(self):
+ # test that print options are actually restrored
+ opts = np.get_printoptions()
+ with np.printoptions(precision=opts['precision'] - 1,
+ linewidth=opts['linewidth'] - 4):
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_exceptions(self):
+ # test that print options are restored even if an exception is raised
+ opts = np.get_printoptions()
+ try:
+ with np.printoptions(precision=2, linewidth=11):
+ raise ValueError
+ except ValueError:
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_as_smth(self):
+ opts = {"precision": 2}
+ with np.printoptions(**opts) as ctx:
+ saved_opts = ctx.copy()
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc
new file mode 100644
index 0000000..303f866
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py
new file mode 100644
index 0000000..9832b42
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py
@@ -0,0 +1,2222 @@
+from __future__ import division, absolute_import, print_function
+
+
+import numpy
+import numpy as np
+import datetime
+import pytest
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+ assert_raises_regex,
+ )
+from numpy.core.numeric import pickle
+
+# Use pytz to test out various time zones if available
+try:
+ from pytz import timezone as tz
+ _has_pytz = True
+except ImportError:
+ _has_pytz = False
+
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
+
+
+class TestDateTime(object):
+ def test_datetime_dtype_creation(self):
+ for unit in ['Y', 'M', 'W', 'D',
+ 'h', 'm', 's', 'ms', 'us',
+ 'ns', 'ps', 'fs', 'as']:
+ dt1 = np.dtype('M8[750%s]' % unit)
+ assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
+ dt2 = np.dtype('m8[%s]' % unit)
+ assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
+
+ # Generic units shouldn't add [] to the end
+ assert_equal(str(np.dtype("M8")), "datetime64")
+
+ # Should be possible to specify the endianness
+ assert_equal(np.dtype("=M8"), np.dtype("M8"))
+ assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
+ assert_(np.dtype(">M8") == np.dtype("M8") or
+ np.dtype("M8[D]") == np.dtype("M8[D]") or
+ np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or
+ np.dtype("m8[D]") == np.dtype("m8[D]") or
+ np.dtype("m8") != np.dtype(" Scalars
+ assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
+ assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
+ assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
+ assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
+ assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
+
+ # Arrays -> Scalars
+ assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
+ assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
+ assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
+ assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
+ assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
+
+ def test_days_creation(self):
+ assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3 - 365)
+ assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3)
+ assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3 + 366)
+ assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
+ (1900-1970)*365 - (1970-1900)//4)
+ assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
+ (1900-1970)*365 - (1970-1900)//4 + 365)
+ assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
+ assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
+ assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
+ assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
+ assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
+ assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
+ assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
+ assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
+ assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4)
+ assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 366)
+ assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
+ (2400 - 1970)*365 + (2400 - 1972)//4 - 3)
+ assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
+ (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
+
+ assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
+ assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
+ assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
+ assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
+ assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
+
+ def test_days_to_pydate(self):
+ assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
+ datetime.date(1599, 1, 1))
+ assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 1, 1))
+ assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
+ datetime.date(1601, 1, 1))
+ assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
+ datetime.date(1900, 1, 1))
+ assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
+ datetime.date(1901, 1, 1))
+ assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
+ datetime.date(2000, 1, 1))
+ assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
+ datetime.date(2001, 1, 1))
+ assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 2, 29))
+ assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 3, 1))
+ assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
+ datetime.date(2001, 3, 22))
+
+ def test_dtype_comparison(self):
+ assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
+ assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
+ assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
+ assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
+
+ def test_pydatetime_creation(self):
+ a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ # Will fail if the date changes during the exact right moment
+ a = np.array(['today', datetime.date.today()], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ # datetime.datetime.now() returns local time, not UTC
+ #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
+ #assert_equal(a[0], a[1])
+
+ # we can give a datetime.date time units
+ assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
+ np.array(np.datetime64('1960-03-12T00:00:00')))
+
+ def test_datetime_string_conversion(self):
+ a = ['2011-03-16', '1920-01-01', '2013-05-19']
+ str_a = np.array(a, dtype='S')
+ uni_a = np.array(a, dtype='U')
+ dt_a = np.array(a, dtype='M')
+
+ # String to datetime
+ assert_equal(dt_a, str_a.astype('M'))
+ assert_equal(dt_a.dtype, str_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
+ dt_b[...] = str_a
+ assert_equal(dt_a, dt_b)
+
+ # Datetime to string
+ assert_equal(str_a, dt_a.astype('S0'))
+ str_b = np.empty_like(str_a)
+ str_b[...] = dt_a
+ assert_equal(str_a, str_b)
+
+ # Unicode to datetime
+ assert_equal(dt_a, uni_a.astype('M'))
+ assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
+ dt_b[...] = uni_a
+ assert_equal(dt_a, dt_b)
+
+ # Datetime to unicode
+ assert_equal(uni_a, dt_a.astype('U'))
+ uni_b = np.empty_like(uni_a)
+ uni_b[...] = dt_a
+ assert_equal(uni_a, uni_b)
+
+ # Datetime to long string - gh-9712
+ assert_equal(str_a, dt_a.astype((np.string_, 128)))
+ str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
+ str_b[...] = dt_a
+ assert_equal(str_a, str_b)
+
+ def test_datetime_array_str(self):
+ a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
+ assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
+
+ a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
+ assert_equal(np.array2string(a, separator=', ',
+ formatter={'datetime': lambda x:
+ "'%s'" % np.datetime_as_string(x, timezone='UTC')}),
+ "['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
+
+ # Check that one NaT doesn't corrupt subsequent entries
+ a = np.array(['2010', 'NaT', '2030']).astype('M')
+ assert_equal(str(a), "['2010' 'NaT' '2030']")
+
+ def test_timedelta_array_str(self):
+ a = np.array([-1, 0, 100], dtype='m')
+ assert_equal(str(a), "[ -1 0 100]")
+ a = np.array(['NaT', 'NaT'], dtype='m')
+ assert_equal(str(a), "['NaT' 'NaT']")
+ # Check right-alignment with NaTs
+ a = np.array([-1, 'NaT', 0], dtype='m')
+ assert_equal(str(a), "[ -1 'NaT' 0]")
+ a = np.array([-1, 'NaT', 1234567], dtype='m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+
+ # Test with other byteorder:
+ a = np.array([-1, 'NaT', 1234567], dtype='>m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+ a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
+ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
+
+ def test_setstate(self):
+ "Verify that datetime dtype __setstate__ can handle bad arguments"
+ dt = np.dtype('>M8[us]')
+ assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+
+ def test_dtype_promotion(self):
+ # datetime datetime computes the metadata gcd
+ # timedelta timedelta computes the metadata gcd
+ for mM in ['m', 'M']:
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
+ np.dtype(mM+'8[2Y]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
+ np.dtype(mM+'8[3Y]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
+ np.dtype(mM+'8[2M]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
+ np.dtype(mM+'8[1D]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
+ np.dtype(mM+'8[s]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
+ np.dtype(mM+'8[7s]'))
+ # timedelta timedelta raises when there is no reasonable gcd
+ assert_raises(TypeError, np.promote_types,
+ np.dtype('m8[Y]'), np.dtype('m8[D]'))
+ assert_raises(TypeError, np.promote_types,
+ np.dtype('m8[M]'), np.dtype('m8[W]'))
+ # timedelta timedelta may overflow with big unit ranges
+ assert_raises(OverflowError, np.promote_types,
+ np.dtype('m8[W]'), np.dtype('m8[fs]'))
+ assert_raises(OverflowError, np.promote_types,
+ np.dtype('m8[s]'), np.dtype('m8[as]'))
+
+ def test_cast_overflow(self):
+ # gh-4486
+ def cast():
+ numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("= self.B))
+ assert_(np.all(self.A <= self.B))
+ assert_(not np.any(self.A > self.B))
+ assert_(not np.any(self.A < self.B))
+ assert_(not np.any(self.A != self.B))
+
+class TestChar(object):
+ def setup(self):
+ self.A = np.array('abc1', dtype='c').view(np.chararray)
+
+ def test_it(self):
+ assert_equal(self.A.shape, (4,))
+ assert_equal(self.A.upper()[:2].tobytes(), b'AB')
+
+class TestComparisons(object):
+ def setup(self):
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']]).view(np.chararray)
+ self.B = np.array([['efg', '123 '],
+ ['051', 'tuv']]).view(np.chararray)
+
+ def test_not_equal(self):
+ assert_array_equal((self.A != self.B), [[True, False], [True, True]])
+
+ def test_equal(self):
+ assert_array_equal((self.A == self.B), [[False, True], [False, False]])
+
+ def test_greater_equal(self):
+ assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
+
+ def test_less_equal(self):
+ assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
+
+ def test_greater(self):
+ assert_array_equal((self.A > self.B), [[False, False], [True, True]])
+
+ def test_less(self):
+ assert_array_equal((self.A < self.B), [[True, False], [False, False]])
+
+class TestComparisonsMixed1(TestComparisons):
+ """Ticket #1276"""
+
+ def setup(self):
+ TestComparisons.setup(self)
+ self.B = np.array([['efg', '123 '],
+ ['051', 'tuv']], np.unicode_).view(np.chararray)
+
+class TestComparisonsMixed2(TestComparisons):
+ """Ticket #1276"""
+
+ def setup(self):
+ TestComparisons.setup(self)
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']], np.unicode_).view(np.chararray)
+
+class TestInformation(object):
+ def setup(self):
+ self.A = np.array([[' abc ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+ self.B = np.array([[u' \u03a3 ', u''],
+ [u'12345', u'MixedCase'],
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
+
+ def test_len(self):
+ assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
+ assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
+ assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
+
+ def test_count(self):
+ assert_(issubclass(self.A.count('').dtype.type, np.integer))
+ assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
+ assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
+ # Python doesn't seem to like counting NULL characters
+ # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
+ assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
+ assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
+ assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
+ # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
+
+ def test_endswith(self):
+ assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
+ assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
+ assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+ def fail():
+ self.A.endswith('3', 'fdjk')
+
+ assert_raises(TypeError, fail)
+
+ def test_find(self):
+ assert_(issubclass(self.A.find('a').dtype.type, np.integer))
+ assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
+ assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
+ assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+ assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
+
+ def test_index(self):
+
+ def fail():
+ self.A.index('a')
+
+ assert_raises(ValueError, fail)
+ assert_(np.char.index('abcba', 'b') == 1)
+ assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
+
+ def test_isalnum(self):
+ assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
+ assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
+
+ def test_isalpha(self):
+ assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
+ assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
+
+ def test_isdigit(self):
+ assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
+ assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
+
+ def test_islower(self):
+ assert_(issubclass(self.A.islower().dtype.type, np.bool_))
+ assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
+
+ def test_isspace(self):
+ assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
+ assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
+
+ def test_istitle(self):
+ assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
+ assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
+
+ def test_isupper(self):
+ assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
+ assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
+
+ def test_rfind(self):
+ assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
+ assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
+ assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
+ assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+ assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
+
+ def test_rindex(self):
+
+ def fail():
+ self.A.rindex('a')
+
+ assert_raises(ValueError, fail)
+ assert_(np.char.rindex('abcba', 'b') == 3)
+ assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
+
+ def test_startswith(self):
+ assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
+ assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
+ assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+ def fail():
+ self.A.startswith('3', 'fdjk')
+
+ assert_raises(TypeError, fail)
+
+
+class TestMethods(object):
+ def setup(self):
+ self.A = np.array([[' abc ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']],
+ dtype='S').view(np.chararray)
+ self.B = np.array([[u' \u03a3 ', u''],
+ [u'12345', u'MixedCase'],
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
+
+ def test_capitalize(self):
+ tgt = [[b' abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
+ assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
+ assert_array_equal(self.A.capitalize(), tgt)
+
+ tgt = [[u' \u03c3 ', ''],
+ ['12345', 'Mixedcase'],
+ ['123 \t 345 \0 ', 'Upper']]
+ assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
+ assert_array_equal(self.B.capitalize(), tgt)
+
+ def test_center(self):
+ assert_(issubclass(self.A.center(10).dtype.type, np.string_))
+ C = self.A.center([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.center(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_(np.all(C.endswith(b'#')))
+
+ C = np.char.center(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO ', b' FOO '],
+ [b' FOO ', b' FOO ']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_decode(self):
+ if sys.version_info[0] >= 3:
+ A = np.char.array([b'\\u03a3'])
+ assert_(A.decode('unicode-escape')[0] == '\u03a3')
+ else:
+ with suppress_warnings() as sup:
+ if sys.py3kwarning:
+ sup.filter(DeprecationWarning, "'hex_codec'")
+ A = np.char.array(['736563726574206d657373616765'])
+ assert_(A.decode('hex_codec')[0] == 'secret message')
+
+ def test_encode(self):
+ B = self.B.encode('unicode_escape')
+ assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
+
+ def test_expandtabs(self):
+ T = self.A.expandtabs()
+ assert_(T[2, 0] == b'123 345 \0')
+
+ def test_join(self):
+ if sys.version_info[0] >= 3:
+ # NOTE: list(b'123') == [49, 50, 51]
+ # so that b','.join(b'123') results to an error on Py3
+ A0 = self.A.decode('ascii')
+ else:
+ A0 = self.A
+
+ A = np.char.join([',', '#'], A0)
+ if sys.version_info[0] >= 3:
+ assert_(issubclass(A.dtype.type, np.unicode_))
+ else:
+ assert_(issubclass(A.dtype.type, np.string_))
+ tgt = np.array([[' ,a,b,c, ', ''],
+ ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
+ ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
+ assert_array_equal(np.char.join([',', '#'], A0), tgt)
+
+ def test_ljust(self):
+ assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
+
+ C = self.A.ljust([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.ljust(20, b'#')
+ assert_array_equal(C.startswith(b'#'), [
+ [False, True], [False, False], [False, False]])
+ assert_(np.all(C.endswith(b'#')))
+
+ C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b'FOO ', b'FOO '],
+ [b'FOO ', b'FOO ']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_lower(self):
+ tgt = [[b' abc ', b''],
+ [b'12345', b'mixedcase'],
+ [b'123 \t 345 \0 ', b'upper']]
+ assert_(issubclass(self.A.lower().dtype.type, np.string_))
+ assert_array_equal(self.A.lower(), tgt)
+
+ tgt = [[u' \u03c3 ', u''],
+ [u'12345', u'mixedcase'],
+ [u'123 \t 345 \0 ', u'upper']]
+ assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
+ assert_array_equal(self.B.lower(), tgt)
+
+ def test_lstrip(self):
+ tgt = [[b'abc ', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345 \0 ', b'UPPER']]
+ assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
+ assert_array_equal(self.A.lstrip(), tgt)
+
+ tgt = [[b' abc', b''],
+ [b'2345', b'ixedCase'],
+ [b'23 \t 345 \x00', b'UPPER']]
+ assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
+
+ tgt = [[u'\u03a3 ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]
+ assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.lstrip(), tgt)
+
+ def test_partition(self):
+ P = self.A.partition([b'3', b'M'])
+ tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, tgt)
+
+ def test_replace(self):
+ R = self.A.replace([b'3', b'a'],
+ [b'##########', b'@'])
+ tgt = [[b' abc ', b''],
+ [b'12##########45', b'MixedC@se'],
+ [b'12########## \t ##########45 \x00', b'UPPER']]
+ assert_(issubclass(R.dtype.type, np.string_))
+ assert_array_equal(R, tgt)
+
+ if sys.version_info[0] < 3:
+ # NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
+ R = self.A.replace(b'a', u'\u03a3')
+ tgt = [[u' \u03a3bc ', ''],
+ ['12345', u'MixedC\u03a3se'],
+ ['123 \t 345 \x00', 'UPPER']]
+ assert_(issubclass(R.dtype.type, np.unicode_))
+ assert_array_equal(R, tgt)
+
+ def test_rjust(self):
+ assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
+
+ C = self.A.rjust([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.rjust(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_array_equal(C.endswith(b'#'),
+ [[False, True], [False, False], [False, False]])
+
+ C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO', b' FOO'],
+ [b' FOO', b' FOO']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_rpartition(self):
+ P = self.A.rpartition([b'3', b'M'])
+ tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, tgt)
+
+ def test_rsplit(self):
+ A = self.A.rsplit(b'3')
+ tgt = [[[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_equal(A.tolist(), tgt)
+
+ def test_rstrip(self):
+ assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
+
+ tgt = [[b' abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
+ assert_array_equal(self.A.rstrip(), tgt)
+
+ tgt = [[b' abc ', b''],
+ [b'1234', b'MixedCase'],
+ [b'123 \t 345 \x00', b'UPP']
+ ]
+ assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
+
+ tgt = [[u' \u03a3', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345', 'UPPER']]
+ assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.rstrip(), tgt)
+
+ def test_strip(self):
+ tgt = [[b'abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
+ assert_(issubclass(self.A.strip().dtype.type, np.string_))
+ assert_array_equal(self.A.strip(), tgt)
+
+ tgt = [[b' abc ', b''],
+ [b'234', b'ixedCas'],
+ [b'23 \t 345 \x00', b'UPP']]
+ assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
+
+ tgt = [[u'\u03a3', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345', 'UPPER']]
+ assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.strip(), tgt)
+
+ def test_split(self):
+ A = self.A.split(b'3')
+ tgt = [
+ [[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_equal(A.tolist(), tgt)
+
+ def test_splitlines(self):
+ A = np.char.array(['abc\nfds\nwer']).splitlines()
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_(A.shape == (1,))
+ assert_(len(A[0]) == 3)
+
+ def test_swapcase(self):
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'mIXEDcASE'],
+ [b'123 \t 345 \0 ', b'upper']]
+ assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
+ assert_array_equal(self.A.swapcase(), tgt)
+
+ tgt = [[u' \u03c3 ', u''],
+ [u'12345', u'mIXEDcASE'],
+ [u'123 \t 345 \0 ', u'upper']]
+ assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
+ assert_array_equal(self.B.swapcase(), tgt)
+
+ def test_title(self):
+ tgt = [[b' Abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
+ assert_(issubclass(self.A.title().dtype.type, np.string_))
+ assert_array_equal(self.A.title(), tgt)
+
+ tgt = [[u' \u03a3 ', u''],
+ [u'12345', u'Mixedcase'],
+ [u'123 \t 345 \0 ', u'Upper']]
+ assert_(issubclass(self.B.title().dtype.type, np.unicode_))
+ assert_array_equal(self.B.title(), tgt)
+
+ def test_upper(self):
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'MIXEDCASE'],
+ [b'123 \t 345 \0 ', b'UPPER']]
+ assert_(issubclass(self.A.upper().dtype.type, np.string_))
+ assert_array_equal(self.A.upper(), tgt)
+
+ tgt = [[u' \u03a3 ', u''],
+ [u'12345', u'MIXEDCASE'],
+ [u'123 \t 345 \0 ', u'UPPER']]
+ assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
+ assert_array_equal(self.B.upper(), tgt)
+
+ def test_isnumeric(self):
+
+ def fail():
+ self.A.isnumeric()
+
+ assert_raises(TypeError, fail)
+ assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
+ assert_array_equal(self.B.isnumeric(), [
+ [False, False], [True, False], [False, False]])
+
+ def test_isdecimal(self):
+
+ def fail():
+ self.A.isdecimal()
+
+ assert_raises(TypeError, fail)
+ assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
+ assert_array_equal(self.B.isdecimal(), [
+ [False, False], [True, False], [False, False]])
+
+
+class TestOperations(object):
+ def setup(self):
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']]).view(np.chararray)
+ self.B = np.array([['efg', '456'],
+ ['051', 'tuv']]).view(np.chararray)
+
+ def test_add(self):
+ AB = np.array([['abcefg', '123456'],
+ ['789051', 'xyztuv']]).view(np.chararray)
+ assert_array_equal(AB, (self.A + self.B))
+ assert_(len((self.A + self.B)[0][0]) == 6)
+
+ def test_radd(self):
+ QA = np.array([['qabc', 'q123'],
+ ['q789', 'qxyz']]).view(np.chararray)
+ assert_array_equal(QA, ('q' + self.A))
+
+ def test_mul(self):
+ A = self.A
+ for r in (2, 3, 5, 7, 197):
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+
+ assert_array_equal(Ar, (self.A * r))
+
+ for ob in [object(), 'qrs']:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ A*ob
+
+ def test_rmul(self):
+ A = self.A
+ for r in (2, 3, 5, 7, 197):
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+ assert_array_equal(Ar, (r * self.A))
+
+ for ob in [object(), 'qrs']:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ ob * A
+
+ def test_mod(self):
+ """Ticket #856"""
+ F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
+ C = np.array([[3, 7], [19, 1]])
+ FC = np.array([['3', '7.000000'],
+ ['19', '1']]).view(np.chararray)
+ assert_array_equal(FC, F % C)
+
+ A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
+ A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
+ assert_array_equal(A1, (A % 1))
+
+ A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
+ assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
+
+ def test_rmod(self):
+ assert_(("%s" % self.A) == str(self.A))
+ assert_(("%r" % self.A) == repr(self.A))
+
+ for ob in [42, object()]:
+ with assert_raises_regex(
+ TypeError, "unsupported operand type.* and 'chararray'"):
+ ob % self.A
+
+ def test_slice(self):
+ """Regression test for https://github.com/numpy/numpy/issues/5982"""
+
+ arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
+ dtype='S4').view(np.chararray)
+ sl1 = arr[:]
+ assert_array_equal(sl1, arr)
+ assert_(sl1.base is arr)
+ assert_(sl1.base.base is arr.base)
+
+ sl2 = arr[:, :]
+ assert_array_equal(sl2, arr)
+ assert_(sl2.base is arr)
+ assert_(sl2.base.base is arr.base)
+
+ assert_(arr[0, 0] == b'abc')
+
+
+def test_empty_indexing():
+ """Regression test for ticket 1948."""
+ # Check that indexing a chararray with an empty list/array returns an
+ # empty chararray instead of a chararray with a single empty string in it.
+ s = np.chararray((4,))
+ assert_(s[[]].size == 0)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc
new file mode 100644
index 0000000..b0941bf
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_defchararray.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py
new file mode 100644
index 0000000..edb5d5e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.py
@@ -0,0 +1,535 @@
+"""
+Tests related to deprecation warnings. Also a convenient place
+to document how deprecations should eventually be turned into errors.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import datetime
+import sys
+import operator
+import warnings
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_raises, assert_warns, assert_
+ )
+
+try:
+ import pytz
+ _has_pytz = True
+except ImportError:
+ _has_pytz = False
+
+
+class _DeprecationTestCase(object):
+ # Just as warning: warnings uses re.match, so the start of this message
+ # must match.
+ message = ''
+ warning_cls = DeprecationWarning
+
+ def setup(self):
+ self.warn_ctx = warnings.catch_warnings(record=True)
+ self.log = self.warn_ctx.__enter__()
+
+ # Do *not* ignore other DeprecationWarnings. Ignoring warnings
+ # can give very confusing results because of
+ # https://bugs.python.org/issue4180 and it is probably simplest to
+ # try to keep the tests cleanly giving only the right warning type.
+ # (While checking them set to "error" those are ignored anyway)
+ # We still have them show up, because otherwise they would be raised
+ warnings.filterwarnings("always", category=self.warning_cls)
+ warnings.filterwarnings("always", message=self.message,
+ category=self.warning_cls)
+
+ def teardown(self):
+ self.warn_ctx.__exit__()
+
+ def assert_deprecated(self, function, num=1, ignore_others=False,
+ function_fails=False,
+ exceptions=np._NoValue,
+ args=(), kwargs={}):
+ """Test if DeprecationWarnings are given and raised.
+
+ This first checks if the function when called gives `num`
+ DeprecationWarnings, after that it tries to raise these
+ DeprecationWarnings and compares them with `exceptions`.
+ The exceptions can be different for cases where this code path
+ is simply not anticipated and the exception is replaced.
+
+ Parameters
+ ----------
+ function : callable
+ The function to test
+ num : int
+ Number of DeprecationWarnings to expect. This should normally be 1.
+ ignore_others : bool
+ Whether warnings of the wrong type should be ignored (note that
+ the message is not checked)
+ function_fails : bool
+ If the function would normally fail, setting this will check for
+ warnings inside a try/except block.
+ exceptions : Exception or tuple of Exceptions
+ Exception to expect when turning the warnings into an error.
+ The default checks for DeprecationWarnings. If exceptions is
+ empty the function is expected to run successfully.
+ args : tuple
+ Arguments for `function`
+ kwargs : dict
+ Keyword arguments for `function`
+ """
+ # reset the log
+ self.log[:] = []
+
+ if exceptions is np._NoValue:
+ exceptions = (self.warning_cls,)
+
+ try:
+ function(*args, **kwargs)
+ except (Exception if function_fails else tuple()):
+ pass
+
+ # just in case, clear the registry
+ num_found = 0
+ for warning in self.log:
+ if warning.category is self.warning_cls:
+ num_found += 1
+ elif not ignore_others:
+ raise AssertionError(
+ "expected %s but got: %s" %
+ (self.warning_cls.__name__, warning.category))
+ if num is not None and num_found != num:
+ msg = "%i warnings found but %i expected." % (len(self.log), num)
+ lst = [str(w.category) for w in self.log]
+ raise AssertionError("\n".join([msg] + lst))
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error", message=self.message,
+ category=self.warning_cls)
+ try:
+ function(*args, **kwargs)
+ if exceptions != tuple():
+ raise AssertionError(
+ "No error raised during function call")
+ except exceptions:
+ if exceptions == tuple():
+ raise AssertionError(
+ "Error raised during function call")
+
+ def assert_not_deprecated(self, function, args=(), kwargs={}):
+ """Test that warnings are not raised.
+
+ This is just a shorthand for:
+
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+ """
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+
+
+class _VisibleDeprecationTestCase(_DeprecationTestCase):
+ warning_cls = np.VisibleDeprecationWarning
+
+
+class TestNonTupleNDIndexDeprecation(object):
+ def test_basic(self):
+ a = np.zeros((5, 5))
+ with warnings.catch_warnings():
+ warnings.filterwarnings('always')
+ assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_warns(FutureWarning, a.__getitem__, [slice(None)])
+
+ warnings.filterwarnings('error')
+ assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
+ assert_raises(FutureWarning, a.__getitem__, [slice(None)])
+
+ # a a[[0, 1]] always was advanced indexing, so no error/warning
+ a[[0, 1]]
+
+
+class TestRankDeprecation(_DeprecationTestCase):
+ """Test that np.rank is deprecated. The function should simply be
+ removed. The VisibleDeprecationWarning may become unnecessary.
+ """
+
+ def test(self):
+ a = np.arange(10)
+ assert_warns(np.VisibleDeprecationWarning, np.rank, a)
+
+
+class TestComparisonDeprecations(_DeprecationTestCase):
+ """This tests the deprecation, for non-element-wise comparison logic.
+ This used to mean that when an error occurred during element-wise comparison
+ (i.e. broadcasting) NotImplemented was returned, but also in the comparison
+ itself, False was given instead of the error.
+
+ Also test FutureWarning for the None comparison.
+ """
+
+ message = "elementwise.* comparison failed; .*"
+
+ def test_normal_types(self):
+ for op in (operator.eq, operator.ne):
+ # Broadcasting errors:
+ self.assert_deprecated(op, args=(np.zeros(3), []))
+ a = np.zeros(3, dtype='i,i')
+ # (warning is issued a couple of times here)
+ self.assert_deprecated(op, args=(a, a[:-1]), num=None)
+
+ # Element comparison error (numpy array can't be compared).
+ a = np.array([1, np.array([1,2,3])], dtype=object)
+ b = np.array([1, np.array([1,2,3])], dtype=object)
+ self.assert_deprecated(op, args=(a, b), num=None)
+
+ def test_string(self):
+ # For two string arrays, strings always raised the broadcasting error:
+ a = np.array(['a', 'b'])
+ b = np.array(['a', 'b', 'c'])
+ assert_raises(ValueError, lambda x, y: x == y, a, b)
+
+ # The empty list is not cast to string, and this used to pass due
+ # to dtype mismatch; now (2018-06-21) it correctly leads to a
+ # FutureWarning.
+ assert_warns(FutureWarning, lambda: a == [])
+
+ def test_void_dtype_equality_failures(self):
+ class NotArray(object):
+ def __array__(self):
+ raise TypeError
+
+ # Needed so Python 3 does not raise DeprecationWarning twice.
+ def __ne__(self, other):
+ return NotImplemented
+
+ self.assert_deprecated(lambda: np.arange(2) == NotArray())
+ self.assert_deprecated(lambda: np.arange(2) != NotArray())
+
+ struct1 = np.zeros(2, dtype="i4,i4")
+ struct2 = np.zeros(2, dtype="i4,i4,i4")
+
+ assert_warns(FutureWarning, lambda: struct1 == 1)
+ assert_warns(FutureWarning, lambda: struct1 == struct2)
+ assert_warns(FutureWarning, lambda: struct1 != 1)
+ assert_warns(FutureWarning, lambda: struct1 != struct2)
+
+ def test_array_richcompare_legacy_weirdness(self):
+ # It doesn't really work to use assert_deprecated here, b/c part of
+ # the point of assert_deprecated is to check that when warnings are
+ # set to "error" mode then the error is propagated -- which is good!
+ # But here we are testing a bunch of code that is deprecated *because*
+ # it has the habit of swallowing up errors and converting them into
+ # different warnings. So assert_warns will have to be sufficient.
+ assert_warns(FutureWarning, lambda: np.arange(2) == "a")
+ assert_warns(FutureWarning, lambda: np.arange(2) != "a")
+ # No warning for scalar comparisons
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ assert_(not (np.array(0) == "a"))
+ assert_(np.array(0) != "a")
+ assert_(not (np.int16(0) == "a"))
+ assert_(np.int16(0) != "a")
+
+ for arg1 in [np.asarray(0), np.int16(0)]:
+ struct = np.zeros(2, dtype="i4,i4")
+ for arg2 in [struct, "a"]:
+ for f in [operator.lt, operator.le, operator.gt, operator.ge]:
+ if sys.version_info[0] >= 3:
+ # py3
+ with warnings.catch_warnings() as l:
+ warnings.filterwarnings("always")
+ assert_raises(TypeError, f, arg1, arg2)
+ assert_(not l)
+ else:
+ # py2
+ assert_warns(DeprecationWarning, f, arg1, arg2)
+
+
+class TestDatetime64Timezone(_DeprecationTestCase):
+ """Parsing of datetime64 with timezones deprecated in 1.11.0, because
+ datetime64 is now timezone naive rather than UTC only.
+
+ It will be quite a while before we can remove this, because, at the very
+ least, a lot of existing code uses the 'Z' modifier to avoid conversion
+ from local time to UTC, even if otherwise it handles time in a timezone
+ naive fashion.
+ """
+ def test_string(self):
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
+
+ @pytest.mark.skipif(not _has_pytz,
+ reason="The pytz module is not available.")
+ def test_datetime(self):
+ tz = pytz.timezone('US/Eastern')
+ dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
+ self.assert_deprecated(np.datetime64, args=(dt,))
+
+
+class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
+ """View of non-C-contiguous arrays deprecated in 1.11.0.
+
+ The deprecation will not be raised for arrays that are both C and F
+ contiguous, as C contiguous is dominant. There are more such arrays
+ with relaxed stride checking than without so the deprecation is not
+ as visible with relaxed stride checking in force.
+ """
+
+ def test_fortran_contiguous(self):
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
+
+
+class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
+ """Invalid arguments to the ORDER parameter in array.flatten() should not be
+ allowed and should raise an error. However, in the interests of not breaking
+ code that may inadvertently pass invalid arguments to this parameter, a
+ DeprecationWarning will be issued instead for the time being to give developers
+ time to refactor relevant code.
+ """
+
+ def test_flatten_array_non_string_arg(self):
+ x = np.zeros((3, 5))
+ self.message = ("Non-string object detected for "
+ "the array ordering. Please pass "
+ "in 'C', 'F', 'A', or 'K' instead")
+ self.assert_deprecated(x.flatten, args=(np.pi,))
+
+ def test_flatten_array_invalid_string_arg(self):
+ # Tests that a DeprecationWarning is raised
+ # when a string of length greater than one
+ # starting with "C", "F", "A", or "K" (case-
+ # and unicode-insensitive) is passed in for
+ # the ORDER parameter. Otherwise, a TypeError
+ # will be raised!
+
+ x = np.zeros((3, 5))
+ self.message = ("Non length-one string passed "
+ "in for the array ordering. Please "
+ "pass in 'C', 'F', 'A', or 'K' instead")
+ self.assert_deprecated(x.flatten, args=("FACK",))
+
+
+class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
+ """Assigning the 'data' attribute of an ndarray is unsafe as pointed
+ out in gh-7093. Eventually, such assignment should NOT be allowed, but
+ in the interests of maintaining backwards compatibility, only a Deprecation-
+ Warning will be raised instead for the time being to give developers time to
+ refactor relevant code.
+ """
+
+ def test_data_attr_assignment(self):
+ a = np.arange(10)
+ b = np.linspace(0, 1, 10)
+
+ self.message = ("Assigning the 'data' attribute is an "
+ "inherently unsafe operation and will "
+ "be removed in the future.")
+ self.assert_deprecated(a.__setattr__, args=('data', b.data))
+
+
+class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
+ """Argument to the num parameter in linspace that cannot be
+ safely interpreted as an integer is deprecated in 1.12.0.
+
+ Argument to the num parameter in linspace that cannot be
+ safely interpreted as an integer should not be allowed.
+ In the interest of not breaking code that passes
+ an argument that could still be interpreted as an integer, a
+ DeprecationWarning will be issued for the time being to give
+ developers time to refactor relevant code.
+ """
+ def test_float_arg(self):
+ # 2016-02-25, PR#7328
+ self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
+
+
+class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
+ """
+ If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
+ represent the number in base 2 (positive) or 2's complement (negative) form,
+ the function used to silently ignore the parameter and return a representation
+ using the minimal number of bits needed for the form in question. Such behavior
+ is now considered unsafe from a user perspective and will raise an error in the future.
+ """
+
+ def test_insufficient_width_positive(self):
+ args = (10,)
+ kwargs = {'width': 2}
+
+ self.message = ("Insufficient bit width provided. This behavior "
+ "will raise an error in the future.")
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+ def test_insufficient_width_negative(self):
+ args = (-5,)
+ kwargs = {'width': 2}
+
+ self.message = ("Insufficient bit width provided. This behavior "
+ "will raise an error in the future.")
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+
+class TestNumericStyleTypecodes(_DeprecationTestCase):
+ """
+ Deprecate the old numeric-style dtypes, which are especially
+ confusing for complex types, e.g. Complex32 -> complex64. When the
+ deprecation cycle is complete, the check for the strings should be
+ removed from PyArray_DescrConverter in descriptor.c, and the
+ deprecated keys should not be added as capitalized aliases in
+ _add_aliases in numerictypes.py.
+ """
+ def test_all_dtypes(self):
+ deprecated_types = [
+ 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
+ ]
+ if sys.version_info[0] < 3:
+ deprecated_types.extend(['Unicode0', 'String0'])
+
+ for dt in deprecated_types:
+ self.assert_deprecated(np.dtype, exceptions=(TypeError,),
+ args=(dt,))
+
+
+class TestTestDeprecated(object):
+ def test_assert_deprecated(self):
+ test_case_instance = _DeprecationTestCase()
+ test_case_instance.setup()
+ assert_raises(AssertionError,
+ test_case_instance.assert_deprecated,
+ lambda: None)
+
+ def foo():
+ warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
+
+ test_case_instance.assert_deprecated(foo)
+ test_case_instance.teardown()
+
+
+class TestClassicIntDivision(_DeprecationTestCase):
+ """
+ See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
+ if used for division
+ List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
+ """
+ def test_int_dtypes(self):
+ #scramble types and do some mix and match testing
+ deprecated_types = [
+ 'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
+ 'intp', 'int64', 'uint32', 'int16'
+ ]
+ if sys.version_info[0] < 3 and sys.py3kwarning:
+ import operator as op
+ dt2 = 'bool_'
+ for dt1 in deprecated_types:
+ a = np.array([1,2,3], dtype=dt1)
+ b = np.array([1,2,3], dtype=dt2)
+ self.assert_deprecated(op.div, args=(a,b))
+ dt2 = dt1
+
+
+class TestNonNumericConjugate(_DeprecationTestCase):
+ """
+ Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
+ which conflicts with the error behavior of np.conjugate.
+ """
+ def test_conjugate(self):
+ for a in np.array(5), np.array(5j):
+ self.assert_not_deprecated(a.conjugate)
+ for a in (np.array('s'), np.array('2016', 'M'),
+ np.array((1, 2), [('a', int), ('b', int)])):
+ self.assert_deprecated(a.conjugate)
+
+
+class TestNPY_CHAR(_DeprecationTestCase):
+ # 2017-05-03, 1.13.0
+ def test_npy_char_deprecation(self):
+ from numpy.core._multiarray_tests import npy_char_deprecation
+ self.assert_deprecated(npy_char_deprecation)
+ assert_(npy_char_deprecation() == 'S1')
+
+
+class Test_UPDATEIFCOPY(_DeprecationTestCase):
+ """
+ v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
+ WRITEBACKIFCOPY instead
+ """
+ def test_npy_updateifcopy_deprecation(self):
+ from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
+
+
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
+class TestTruthTestingEmptyArrays(_DeprecationTestCase):
+ # 2017-09-25, 1.14.0
+ message = '.*truth value of an empty array is ambiguous.*'
+
+ def test_1d(self):
+ self.assert_deprecated(bool, args=(np.array([]),))
+
+ def test_2d(self):
+ self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
+
+
+class TestBincount(_DeprecationTestCase):
+ # 2017-06-01, 1.14.0
+ def test_bincount_minlength(self):
+ self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+
+
+class TestGeneratorSum(_DeprecationTestCase):
+ # 2018-02-25, 1.15.0
+ def test_generator_sum(self):
+ self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestSctypeNA(_VisibleDeprecationTestCase):
+ # 2018-06-24, 1.16
+ def test_sctypeNA(self):
+ self.assert_deprecated(lambda: np.sctypeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA.get('?'))
+
+
+class TestPositiveOnNonNumerical(_DeprecationTestCase):
+ # 2018-06-28, 1.16.0
+ def test_positive_on_non_number(self):
+ self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
+class TestFromstring(_DeprecationTestCase):
+ # 2017-10-19, 1.14
+ def test_fromstring(self):
+ self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+ # 2018-09-20, 1.16.0
+ def test_get_numeric_ops(self):
+ from numpy.core._multiarray_tests import getset_numericops
+ self.assert_deprecated(getset_numericops, num=2)
+
+ # empty kwargs prevents any state actually changing which would break
+ # other tests.
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc
new file mode 100644
index 0000000..5e429f2
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_deprecations.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py
new file mode 100644
index 0000000..8f37119
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.py
@@ -0,0 +1,1122 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import operator
+import pytest
+import ctypes
+import gc
+
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
+from numpy.core.numeric import pickle
+
+def assert_dtype_equal(a, b):
+ assert_equal(a, b)
+ assert_equal(hash(a), hash(b),
+ "two equivalent types do not hash to the same value !")
+
+def assert_dtype_not_equal(a, b):
+ assert_(a != b)
+ assert_(hash(a) != hash(b),
+ "two different types hash to the same value !")
+
+class TestBuiltin(object):
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.unicode])
+ def test_run(self, t):
+ """Only test hash runs at all."""
+ dt = np.dtype(t)
+ hash(dt)
+
+ @pytest.mark.parametrize('t', [int, float])
+ def test_dtype(self, t):
+ # Make sure equivalent byte order char hash the same (e.g. < and = on
+ # little endian)
+ dt = np.dtype(t)
+ dt2 = dt.newbyteorder("<")
+ dt3 = dt.newbyteorder(">")
+ if dt == dt2:
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt2)
+ else:
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt3)
+
+ def test_equivalent_dtype_hashing(self):
+ # Make sure equivalent dtypes with different type num hash equal
+ uintp = np.dtype(np.uintp)
+ if uintp.itemsize == 4:
+ left = uintp
+ right = np.dtype(np.uint32)
+ else:
+ left = uintp
+ right = np.dtype(np.ulonglong)
+ assert_(left == right)
+ assert_(hash(left) == hash(right))
+
+ def test_invalid_types(self):
+ # Make sure invalid type strings raise an error
+
+ assert_raises(TypeError, np.dtype, 'O3')
+ assert_raises(TypeError, np.dtype, 'O5')
+ assert_raises(TypeError, np.dtype, 'O7')
+ assert_raises(TypeError, np.dtype, 'b3')
+ assert_raises(TypeError, np.dtype, 'h4')
+ assert_raises(TypeError, np.dtype, 'I5')
+ assert_raises(TypeError, np.dtype, 'e3')
+ assert_raises(TypeError, np.dtype, 'f5')
+
+ if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
+ assert_raises(TypeError, np.dtype, 'g12')
+ elif np.dtype('g').itemsize == 12:
+ assert_raises(TypeError, np.dtype, 'g16')
+
+ if np.dtype('l').itemsize == 8:
+ assert_raises(TypeError, np.dtype, 'l4')
+ assert_raises(TypeError, np.dtype, 'L4')
+ else:
+ assert_raises(TypeError, np.dtype, 'l8')
+ assert_raises(TypeError, np.dtype, 'L8')
+
+ if np.dtype('q').itemsize == 8:
+ assert_raises(TypeError, np.dtype, 'q4')
+ assert_raises(TypeError, np.dtype, 'Q4')
+ else:
+ assert_raises(TypeError, np.dtype, 'q8')
+ assert_raises(TypeError, np.dtype, 'Q8')
+
+ def test_bad_param(self):
+ # Can't give a size that's too small
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', 'i1'],
+ 'offsets':[0, 4],
+ 'itemsize':4})
+ # If alignment is enabled, the alignment (4) must divide the itemsize
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', 'i1'],
+ 'offsets':[0, 4],
+ 'itemsize':9}, align=True)
+ # If alignment is enabled, the individual fields must be aligned
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i1', 'f4'],
+ 'offsets':[0, 2]}, align=True)
+
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+
+class TestRecord(object):
+ def test_equivalent_record(self):
+ """Test whether equivalent record dtypes hash the same."""
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ assert_dtype_equal(a, b)
+
+ def test_different_names(self):
+ # In theory, they may hash the same (collision) ?
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('ye', int)])
+ assert_dtype_not_equal(a, b)
+
+ def test_different_titles(self):
+ # In theory, they may hash the same (collision) ?
+ a = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ b = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['RRed pixel', 'Blue pixel']})
+ assert_dtype_not_equal(a, b)
+
+ def test_mutate(self):
+ # Mutating a dtype should reset the cached hash value
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ c = np.dtype([('ye', int)])
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+ a.names = ['ye']
+ assert_dtype_equal(a, c)
+ assert_dtype_not_equal(a, b)
+ state = b.__reduce__()[2]
+ a.__setstate__(state)
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+
+ def test_not_lists(self):
+ """Test if an appropriate exception is raised when passing bad values to
+ the dtype constructor.
+ """
+ assert_raises(TypeError, np.dtype,
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
+ assert_raises(TypeError, np.dtype,
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
+
+ def test_aligned_size(self):
+ # Check that structured dtypes get padded to an aligned size
+ dt = np.dtype('i4, i1', align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype({'names':['f0', 'f1'],
+ 'formats':['i4', 'u1'],
+ 'offsets':[0, 4]}, align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
+ assert_equal(dt.itemsize, 8)
+ # Nesting should preserve that alignment
+ dt1 = np.dtype([('f0', 'i4'),
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
+ ('f2', 'i1')], align=True)
+ assert_equal(dt1.itemsize, 20)
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
+ 'formats':['i4',
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
+ 'i1'],
+ 'offsets':[0, 4, 16]}, align=True)
+ assert_equal(dt2.itemsize, 20)
+ dt3 = np.dtype({'f0': ('i4', 0),
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
+ 'f2': ('i1', 16)}, align=True)
+ assert_equal(dt3.itemsize, 20)
+ assert_equal(dt1, dt2)
+ assert_equal(dt2, dt3)
+ # Nesting should preserve packing
+ dt1 = np.dtype([('f0', 'i4'),
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
+ ('f2', 'i1')], align=False)
+ assert_equal(dt1.itemsize, 11)
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
+ 'formats':['i4',
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
+ 'i1'],
+ 'offsets':[0, 4, 10]}, align=False)
+ assert_equal(dt2.itemsize, 11)
+ dt3 = np.dtype({'f0': ('i4', 0),
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
+ 'f2': ('i1', 10)}, align=False)
+ assert_equal(dt3.itemsize, 11)
+ assert_equal(dt1, dt2)
+ assert_equal(dt2, dt3)
+ # Array of subtype should preserve alignment
+ dt1 = np.dtype([('a', '|i1'),
+ ('b', [('f0', 'f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))], (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])])
+ assert_equal(str(dt),
+ "[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)), "
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))])]")
+
+ # If the sticky aligned flag is set to True, it makes the
+ # str() function use a dict representation with an 'aligned' flag
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))],
+ (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])],
+ align=True)
+ assert_equal(str(dt),
+ "{'names':['top','bottom'], "
+ "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)),"
+ "[('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))]], "
+ "'offsets':[0,76800], "
+ "'itemsize':80000, "
+ "'aligned':True}")
+ assert_equal(np.dtype(eval(str(dt))), dt)
+
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+ 'offsets': [0, 1, 2],
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
+ assert_equal(str(dt),
+ "[(('Red pixel', 'r'), 'u1'), "
+ "(('Green pixel', 'g'), 'u1'), "
+ "(('Blue pixel', 'b'), 'u1')]")
+
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+ 'formats': ['f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))], (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])])
+ assert_equal(repr(dt),
+ "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)), "
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))])])")
+
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+ 'offsets': [0, 1, 2],
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
+ align=True)
+ assert_equal(repr(dt),
+ "dtype([(('Red pixel', 'r'), 'u1'), "
+ "(('Green pixel', 'g'), 'u1'), "
+ "(('Blue pixel', 'b'), 'u1')], align=True)")
+
+ def test_repr_structured_not_packed(self):
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+ 'formats': ['= 3, reason="Python 2 only")
+ def test_dtype_str_with_long_in_shape(self):
+ # Pull request #376, should not error
+ np.dtype('(1L,)i4')
+
+ def test_base_dtype_with_object_type(self):
+ # Issue gh-2798, should not error.
+ np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
+
+ def test_empty_string_to_object(self):
+ # Pull request #4722
+ np.array(["", ""]).astype(object)
+
+ def test_void_subclass_unsized(self):
+ dt = np.dtype(np.record)
+ assert_equal(repr(dt), "dtype('V')")
+ assert_equal(str(dt), '|V0')
+ assert_equal(dt.name, 'record')
+
+ def test_void_subclass_sized(self):
+ dt = np.dtype((np.record, 2))
+ assert_equal(repr(dt), "dtype('V2')")
+ assert_equal(str(dt), '|V2')
+ assert_equal(dt.name, 'record16')
+
+ def test_void_subclass_fields(self):
+ dt = np.dtype((np.record, [('a', 'f4', (2, 1)), ('b', 'u4')])
+ self.check(BigEndStruct, expected)
+
+ def test_little_endian_structure_packed(self):
+ class LittleEndStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', 'B'),
+ ('b', '>H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_simple_endian_types(self):
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2'))
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc
new file mode 100644
index 0000000..083ecde
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_dtype.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py
new file mode 100644
index 0000000..3be4a8a
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.py
@@ -0,0 +1,1001 @@
+from __future__ import division, absolute_import, print_function
+
+import itertools
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_raises, suppress_warnings
+ )
+
+# Setup for optimize einsum
+chars = 'abcdefghij'
+sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
+global_size_dict = dict(zip(chars, sizes))
+
+
+class TestEinsum(object):
+ def test_einsum_errors(self):
+ for do_opt in [True, False]:
+ # Need enough arguments
+ assert_raises(ValueError, np.einsum, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "", optimize=do_opt)
+
+ # subscripts must be a string
+ assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
+
+ # out parameter must be an array
+ assert_raises(TypeError, np.einsum, "", 0, out='test',
+ optimize=do_opt)
+
+ # order parameter must be a valid order
+ assert_raises(TypeError, np.einsum, "", 0, order='W',
+ optimize=do_opt)
+
+ # casting parameter must be a valid casting
+ assert_raises(ValueError, np.einsum, "", 0, casting='blah',
+ optimize=do_opt)
+
+ # dtype parameter must be a valid dtype
+ assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
+ optimize=do_opt)
+
+ # other keyword arguments are rejected
+ assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
+ optimize=do_opt)
+
+ # issue 4528 revealed a segfault with this call
+ assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
+
+ # number of operands must match count in subscripts string
+ assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
+
+ # can't have more subscripts than dimensions in the operand
+ assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
+
+ # invalid ellipsis
+ assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
+
+ # invalid subscript character
+ assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
+
+ # output subscripts must appear in input
+ assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
+
+ # output subscripts may only be specified once
+ assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
+ optimize=do_opt)
+
+ # dimensions much match when being collapsed
+ assert_raises(ValueError, np.einsum, "ii",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ii->i",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+
+ # broadcasting to new dimensions must be enabled explicitly
+ assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
+ out=np.arange(4).reshape(2, 2), optimize=do_opt)
+
+ def test_einsum_views(self):
+ # pass-through
+ for do_opt in [True, False]:
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum(a, [Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum("ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ b = np.einsum(a, [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ # output is writeable whenever input is writeable
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.flags['WRITEABLE'])
+ a.flags['WRITEABLE'] = False
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(not b.flags['WRITEABLE'])
+
+ # transpose
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("ji", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ b = np.einsum(a, [1, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ # diagonal
+ a = np.arange(9)
+ a.shape = (3, 3)
+
+ b = np.einsum("ii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ # diagonal with various ways of broadcasting an additional dimension
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("...ii->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum("ii...->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum("...ii->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("jii->ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("ii...->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ # triple diagonal
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("iii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
+
+ # swap axes
+ a = np.arange(24)
+ a.shape = (2, 3, 4)
+
+ b = np.einsum("ijk->jik", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
+
+ b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
+
+ def check_einsum_sums(self, dtype, do_opt=False):
+ # Check various sums. Does many sizes to exercise unrolled loops.
+
+ # sum(a, axis=-1)
+ for n in range(1, 17):
+ a = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("i->", a, optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+ assert_equal(np.einsum(a, [0], [], optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("...i->...", a, optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+ assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+
+ # sum(a, axis=0)
+ for n in range(1, 17):
+ a = np.arange(2*n, dtype=dtype).reshape(2, n)
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+
+ # trace(a)
+ for n in range(1, 17):
+ a = np.arange(n*n, dtype=dtype).reshape(n, n)
+ assert_equal(np.einsum("ii", a, optimize=do_opt),
+ np.trace(a).astype(dtype))
+ assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
+ np.trace(a).astype(dtype))
+
+ # multiply(a, b)
+ assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
+ for n in range(1, 17):
+ a = np.arange(3 * n, dtype=dtype).reshape(3, n)
+ b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
+ np.multiply(a, b))
+ assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
+ np.multiply(a, b))
+
+ # inner(a,b)
+ for n in range(1, 17):
+ a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
+ assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
+ np.inner(a, b))
+
+ for n in range(1, 11):
+ a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
+ np.inner(a.T, b.T).T)
+ assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
+ np.inner(a.T, b.T).T)
+
+ # outer(a,b)
+ for n in range(1, 17):
+ a = np.arange(3, dtype=dtype)+1
+ b = np.arange(n, dtype=dtype)+1
+ assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
+ np.outer(a, b))
+ assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
+ np.outer(a, b))
+
+ # Suppress the complex warnings for the 'as f8' tests
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+
+ # matvec(a,b) / a.dot(b) where a is matrix, b is vector
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
+ np.dot(a, b))
+
+ c = np.arange(4, dtype=dtype)
+ np.einsum("ij,j", a, b, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1], b, [1], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
+ np.dot(b.T, a.T))
+ assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
+ np.dot(b.T, a.T))
+
+ c = np.arange(4, dtype=dtype)
+ np.einsum("ji,j", a.T, b.T, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a.T, [1, 0], b.T, [1], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
+
+ # matmat(a,b) / a.dot(b) where a is matrix, b is matrix
+ for n in range(1, 17):
+ if n < 8 or dtype != 'f2':
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+ assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
+ np.dot(a, b))
+
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+ c = np.arange(24, dtype=dtype).reshape(4, 6)
+ np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
+ optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1], b, [1, 2], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+
+ # matrix triple product (note this is not currently an efficient
+ # way to multiply 3 matrices)
+ a = np.arange(12, dtype=dtype).reshape(3, 4)
+ b = np.arange(20, dtype=dtype).reshape(4, 5)
+ c = np.arange(30, dtype=dtype).reshape(5, 6)
+ if dtype != 'f2':
+ assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
+ a.dot(b).dot(c))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
+ optimize=do_opt), a.dot(b).dot(c))
+
+ d = np.arange(18, dtype=dtype).reshape(3, 6)
+ np.einsum("ij,jk,kl", a, b, c, out=d,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ tgt = a.astype('f8').dot(b.astype('f8'))
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
+ assert_equal(d, tgt)
+
+ d[...] = 0
+ np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ tgt = a.astype('f8').dot(b.astype('f8'))
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
+ assert_equal(d, tgt)
+
+ # tensordot(a, b)
+ if np.dtype(dtype) != np.dtype('f2'):
+ a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
+ b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
+ assert_equal(np.einsum("ijk, jil -> kl", a, b),
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
+ assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
+
+ c = np.arange(10, dtype=dtype).reshape(5, 2)
+ np.einsum("ijk,jil->kl", a, b, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+ axes=([1, 0], [0, 1])).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+ axes=([1, 0], [0, 1])).astype(dtype))
+
+ # logical_and(logical_and(a!=0, b!=0), c!=0)
+ a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
+ b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
+ c = np.array([True, True, False, True, True, False, True, True])
+ assert_equal(np.einsum("i,i,i->i", a, b, c,
+ dtype='?', casting='unsafe', optimize=do_opt),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+ assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
+ dtype='?', casting='unsafe'),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+
+ a = np.arange(9, dtype=dtype)
+ assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
+ assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
+ assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
+ assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
+
+ # Various stride0, contiguous, and SSE aligned variants
+ for n in range(1, 25):
+ a = np.arange(n, dtype=dtype)
+ if np.dtype(dtype).itemsize > 1:
+ assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
+ np.multiply(a, a))
+ assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
+ assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
+ assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
+ assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
+ assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
+
+ assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
+ np.multiply(a[1:], a[:-1]))
+ assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
+ np.dot(a[1:], a[:-1]))
+ assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
+ 2*np.sum(a[1:]))
+ assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
+ 2*np.sum(a[1:]))
+
+ # An object array, summed as the data type
+ a = np.arange(9, dtype=object)
+
+ b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
+ assert_equal(b, np.sum(a))
+ assert_equal(b.dtype, np.dtype(dtype))
+
+ b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
+ assert_equal(b, np.sum(a))
+ assert_equal(b.dtype, np.dtype(dtype))
+
+ # A case which was failing (ticket #1885)
+ p = np.arange(2) + 1
+ q = np.arange(4).reshape(2, 2) + 3
+ r = np.arange(4).reshape(2, 2) + 7
+ assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
+
+ # singleton dimensions broadcast (gh-10343)
+ p = np.ones((10,2))
+ q = np.ones((1,2))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ np.einsum('ij,ij->j', p, q, optimize=False))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ [10.] * 2)
+
+ # a blas-compatible contraction broadcasting case which was failing
+ # for optimize=True (ticket #10930)
+ x = np.array([2., 3.])
+ y = np.array([4.])
+ assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
+ assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
+
+ # all-ones array was bypassing bug (ticket #10930)
+ p = np.ones((1, 5)) / 2
+ q = np.ones((5, 5)) / 2
+ for optimize in (True, False):
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
+ optimize=optimize),
+ np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize))
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize),
+ np.full((1, 5), 1.25))
+
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
+
+ def test_einsum_sums_int8(self):
+ self.check_einsum_sums('i1')
+
+ def test_einsum_sums_uint8(self):
+ self.check_einsum_sums('u1')
+
+ def test_einsum_sums_int16(self):
+ self.check_einsum_sums('i2')
+
+ def test_einsum_sums_uint16(self):
+ self.check_einsum_sums('u2')
+
+ def test_einsum_sums_int32(self):
+ self.check_einsum_sums('i4')
+ self.check_einsum_sums('i4', True)
+
+ def test_einsum_sums_uint32(self):
+ self.check_einsum_sums('u4')
+ self.check_einsum_sums('u4', True)
+
+ def test_einsum_sums_int64(self):
+ self.check_einsum_sums('i8')
+
+ def test_einsum_sums_uint64(self):
+ self.check_einsum_sums('u8')
+
+ def test_einsum_sums_float16(self):
+ self.check_einsum_sums('f2')
+
+ def test_einsum_sums_float32(self):
+ self.check_einsum_sums('f4')
+
+ def test_einsum_sums_float64(self):
+ self.check_einsum_sums('f8')
+ self.check_einsum_sums('f8', True)
+
+ def test_einsum_sums_longdouble(self):
+ self.check_einsum_sums(np.longdouble)
+
+ def test_einsum_sums_cfloat64(self):
+ self.check_einsum_sums('c8')
+ self.check_einsum_sums('c8', True)
+
+ def test_einsum_sums_cfloat128(self):
+ self.check_einsum_sums('c16')
+
+ def test_einsum_sums_clongdouble(self):
+ self.check_einsum_sums(np.clongdouble)
+
+ def test_einsum_misc(self):
+ # This call used to crash because of a bug in
+ # PyArray_AssignZero
+ a = np.ones((1, 2))
+ b = np.ones((2, 2, 1))
+ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
+
+ # Regression test for issue #10369 (test unicode inputs with Python 2)
+ assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
+ optimize=u'greedy'), 20)
+
+ # The iterator had an issue with buffering this reduction
+ a = np.ones((5, 12, 4, 2, 3), np.int64)
+ b = np.ones((5, 12, 11), np.int64)
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
+ np.einsum('ijklm,ijn->', a, b))
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
+ np.einsum('ijklm,ijn->', a, b, optimize=True))
+
+ # Issue #2027, was a problem in the contiguous 3-argument
+ # inner loop implementation
+ a = np.arange(1, 3)
+ b = np.arange(1, 5).reshape(2, 2)
+ c = np.arange(1, 9).reshape(4, 2)
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+
+ def test_subscript_range(self):
+ # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
+ # when creating a subscript from arrays
+ a = np.ones((2, 3))
+ b = np.ones((3, 4))
+ np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
+ assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
+ assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
+
+ def test_einsum_broadcast(self):
+ # Issue #2455 change in handling ellipsis
+ # remove the 'middle broadcast' error
+ # only use the 'RIGHT' iteration in prepare_op_axes
+ # adds auto broadcast on left where it belongs
+ # broadcast on right has to be explicit
+ # We need to test the optimized parsing as well
+
+ A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
+ B = np.arange(3)
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
+
+ A = np.arange(12).reshape((4, 3))
+ B = np.arange(6).reshape((3, 2))
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
+
+ dims = [2, 3, 4, 5]
+ a = np.arange(np.prod(dims)).reshape(dims)
+ v = np.arange(dims[2])
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
+
+ J, K, M = 160, 160, 120
+ A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
+ B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+ optimize=opt), ref) # used to raise error
+
+ def test_einsum_fixedstridebug(self):
+ # Issue #4485 obscure einsum bug
+ # This case revealed a bug in nditer where it reported a stride
+ # as 'fixed' (0) when it was in fact not fixed during processing
+ # (0 or 4). The reason for the bug was that the check for a fixed
+ # stride was using the information from the 2D inner loop reuse
+ # to restrict the iteration dimensions it had to validate to be
+ # the same, but that 2D inner loop reuse logic is only triggered
+ # during the buffer copying step, and hence it was invalid to
+ # rely on those values. The fix is to check all the dimensions
+ # of the stride in question, which in the test case reveals that
+ # the stride is not fixed.
+ #
+ # NOTE: This test is triggered by the fact that the default buffersize,
+ # used by einsum, is 8192, and 3*2731 = 8193, is larger than that
+ # and results in a mismatch between the buffering and the
+ # striding for operand A.
+ A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
+ B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
+ es = np.einsum('cl, cpx->lpx', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
+ assert_equal(es, tp)
+ # The following is the original test case from the bug report,
+ # made repeatable by changing random arrays to aranges.
+ A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
+ B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
+ es = np.einsum('cl, cpxy->lpxy', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
+ assert_equal(es, tp)
+
+ def test_einsum_fixed_collapsingbug(self):
+ # Issue #5147.
+ # The bug only occurred when output argument of einssum was used.
+ x = np.random.normal(0, 1, (5, 5, 5, 5))
+ y1 = np.zeros((5, 5))
+ np.einsum('aabb->ab', x, out=y1)
+ idx = np.arange(5)
+ y2 = x[idx[:, None], idx[:, None], idx, idx]
+ assert_equal(y1, y2)
+
+ def test_einsum_all_contig_non_contig_output(self):
+ # Issue gh-5907, tests that the all contiguous special case
+ # actually checks the contiguity of the output
+ x = np.ones((5, 5))
+ out = np.ones(10)[::2]
+ correct_base = np.ones(10)
+ correct_base[::2] = 5
+ # Always worked (inner iteration is done with 0-stride):
+ np.einsum('mi,mi,mi->m', x, x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+ # Example 1:
+ out = np.ones(10)[::2]
+ np.einsum('im,im,im->m', x, x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+ # Example 2, buffering causes x to be contiguous but
+ # special cases do not catch the operation before:
+ out = np.ones((2, 2, 2))[..., 0]
+ correct_base = np.ones((2, 2, 2))
+ correct_base[..., 0] = 2
+ x = np.ones((2, 2), np.float32)
+ np.einsum('ij,jk->ik', x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+
+ def test_small_boolean_arrays(self):
+ # See gh-5946.
+ # Use array of True embedded in False.
+ a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+ a[...] = True
+ out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+ tgt = np.ones((2, 1, 1), dtype=np.bool_)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=out)
+ assert_equal(res, tgt)
+
+ def test_out_is_res(self):
+ a = np.arange(9).reshape(3, 3)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+ assert res is a
+
+ def optimize_compare(self, subscripts, operands=None):
+ # Tests all paths of the optimization function against
+ # conventional einsum
+ if operands is None:
+ args = [subscripts]
+ terms = subscripts.split('->')[0].split(',')
+ for term in terms:
+ dims = [global_size_dict[x] for x in term]
+ args.append(np.random.rand(*dims))
+ else:
+ args = [subscripts] + operands
+
+ noopt = np.einsum(*args, optimize=False)
+ opt = np.einsum(*args, optimize='greedy')
+ assert_almost_equal(opt, noopt)
+ opt = np.einsum(*args, optimize='optimal')
+ assert_almost_equal(opt, noopt)
+
+ def test_hadamard_like_products(self):
+ # Hadamard outer products
+ self.optimize_compare('a,ab,abc->abc')
+ self.optimize_compare('a,b,ab->ab')
+
+ def test_index_transformations(self):
+ # Simple index transformation cases
+ self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
+ self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
+ self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
+
+ def test_complex(self):
+ # Long test cases
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
+ self.optimize_compare('abhe,hidj,jgba,hiab,gab')
+ self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
+ self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
+ self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ self.optimize_compare('bdhe,acad,hiab,agac,hibd')
+
+ def test_collapse(self):
+ # Inner products
+ self.optimize_compare('ab,ab,c->')
+ self.optimize_compare('ab,ab,c->c')
+ self.optimize_compare('ab,ab,cd,cd->')
+ self.optimize_compare('ab,ab,cd,cd->ac')
+ self.optimize_compare('ab,ab,cd,cd->cd')
+ self.optimize_compare('ab,ab,cd,cd,ef,ef->')
+
+ def test_expand(self):
+ # Outer products
+ self.optimize_compare('ab,cd,ef->abcdef')
+ self.optimize_compare('ab,cd,ef->acdf')
+ self.optimize_compare('ab,cd,de->abcde')
+ self.optimize_compare('ab,cd,de->be')
+ self.optimize_compare('ab,bcd,cd->abcd')
+ self.optimize_compare('ab,bcd,cd->abd')
+
+ def test_edge_cases(self):
+ # Difficult edge cases for optimization
+ self.optimize_compare('eb,cb,fb->cef')
+ self.optimize_compare('dd,fb,be,cdb->cef')
+ self.optimize_compare('bca,cdb,dbf,afc->')
+ self.optimize_compare('dcc,fce,ea,dbf->ab')
+ self.optimize_compare('fdf,cdd,ccd,afe->ae')
+ self.optimize_compare('abcd,ad')
+ self.optimize_compare('ed,fcd,ff,bcf->be')
+ self.optimize_compare('baa,dcf,af,cde->be')
+ self.optimize_compare('bd,db,eac->ace')
+ self.optimize_compare('fff,fae,bef,def->abd')
+ self.optimize_compare('efc,dbc,acf,fd->abe')
+ self.optimize_compare('ba,ac,da->bcd')
+
+ def test_inner_product(self):
+ # Inner products
+ self.optimize_compare('ab,ab')
+ self.optimize_compare('ab,ba')
+ self.optimize_compare('abc,abc')
+ self.optimize_compare('abc,bac')
+ self.optimize_compare('abc,cba')
+
+ def test_random_cases(self):
+ # Randomly built test cases
+ self.optimize_compare('aab,fa,df,ecc->bde')
+ self.optimize_compare('ecb,fef,bad,ed->ac')
+ self.optimize_compare('bcf,bbb,fbf,fc->')
+ self.optimize_compare('bb,ff,be->e')
+ self.optimize_compare('bcb,bb,fc,fff->')
+ self.optimize_compare('fbb,dfd,fc,fc->')
+ self.optimize_compare('afd,ba,cc,dc->bf')
+ self.optimize_compare('adb,bc,fa,cfc->d')
+ self.optimize_compare('bbd,bda,fc,db->acf')
+ self.optimize_compare('dba,ead,cad->bce')
+ self.optimize_compare('aef,fbc,dca->bde')
+
+ def test_combined_views_mapping(self):
+ # gh-10792
+ a = np.arange(9).reshape(1, 1, 3, 1, 3)
+ b = np.einsum('bbcdc->d', a)
+ assert_equal(b, [12])
+
+ def test_broadcasting_dot_cases(self):
+ # Ensures broadcasting cases are not mistaken for GEMM
+
+ a = np.random.rand(1, 5, 4)
+ b = np.random.rand(4, 6)
+ c = np.random.rand(5, 6)
+ d = np.random.rand(10)
+
+ self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
+ self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
+
+ e = np.random.rand(1, 1, 5, 4)
+ f = np.random.rand(7, 7)
+ self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
+ self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
+
+ # Edge case found in gh-11308
+ g = np.arange(64).reshape(2, 4, 8)
+ self.optimize_compare('obk,ijk->ioj', operands=[g, g])
+
+
+class TestEinsumPath(object):
+ def build_operands(self, string, size_dict=global_size_dict):
+
+ # Builds views based off initial operands
+ operands = [string]
+ terms = string.split('->')[0].split(',')
+ for term in terms:
+ dims = [size_dict[x] for x in term]
+ operands.append(np.random.rand(*dims))
+
+ return operands
+
+ def assert_path_equal(self, comp, benchmark):
+ # Checks if list of tuples are equivalent
+ ret = (len(comp) == len(benchmark))
+ assert_(ret)
+ for pos in range(len(comp) - 1):
+ ret &= isinstance(comp[pos + 1], tuple)
+ ret &= (comp[pos + 1] == benchmark[pos + 1])
+ assert_(ret)
+
+ def test_memory_contraints(self):
+ # Ensure memory constraints are satisfied
+
+ outer_test = self.build_operands('a,b,c->abc')
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ long_test = self.build_operands('acdf,jbje,gihb,hfac')
+ path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ def test_long_paths(self):
+ # Long complex cases
+
+ # Long test 1
+ long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ path, path_str = np.einsum_path(*long_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ # Long test 2
+ long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ path, path_str = np.einsum_path(*long_test2, optimize='greedy')
+ print(path)
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test2, optimize='optimal')
+ print(path)
+ self.assert_path_equal(path, ['einsum_path',
+ (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ def test_edge_paths(self):
+ # Difficult edge cases
+
+ # Edge test1
+ edge_test1 = self.build_operands('eb,cb,fb->cef')
+ path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ # Edge test2
+ edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
+ path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ # Edge test3
+ edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
+ path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test4
+ edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test5
+ edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
+ size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ def test_path_type_input(self):
+ # Test explicit path handeling
+ path_test = self.build_operands('dcc,fce,ea,dbf->ab')
+
+ path, path_str = np.einsum_path(*path_test, optimize=False)
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*path_test, optimize=True)
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+ self.assert_path_equal(path, exp_path)
+
+ # Double check einsum works on the input path
+ noopt = np.einsum(*path_test, optimize=False)
+ opt = np.einsum(*path_test, optimize=exp_path)
+ assert_almost_equal(noopt, opt)
+
+ def test_spaces(self):
+ #gh-10794
+ arr = np.array([[1]])
+ for sp in itertools.product(['', ' '], repeat=4):
+ # no error for any spacing
+ np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
+
+def test_overlap():
+ a = np.arange(9, dtype=int).reshape(3, 3)
+ b = np.arange(9, dtype=int).reshape(3, 3)
+ d = np.dot(a, b)
+ # sanity check
+ c = np.einsum('ij,jk->ik', a, b)
+ assert_equal(c, d)
+ #gh-10080, out overlaps one of the operands
+ c = np.einsum('ij,jk->ik', a, b, out=b)
+ assert_equal(c, d)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc
new file mode 100644
index 0000000..d1232e0
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_einsum.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py
new file mode 100644
index 0000000..670d485
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.py
@@ -0,0 +1,41 @@
+from __future__ import division, absolute_import, print_function
+
+import platform
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_raises
+
+
+class TestErrstate(object):
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+ def test_invalid(self):
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
+ # This should work
+ with np.errstate(invalid='ignore'):
+ np.sqrt(a)
+ # While this should fail!
+ with assert_raises(FloatingPointError):
+ np.sqrt(a)
+
+ def test_divide(self):
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
+ # This should work
+ with np.errstate(divide='ignore'):
+ a // 0
+ # While this should fail!
+ with assert_raises(FloatingPointError):
+ a // 0
+
+ def test_errcall(self):
+ def foo(*args):
+ print(args)
+
+ olderrcall = np.geterrcall()
+ with np.errstate(call=foo):
+ assert_(np.geterrcall() is foo, 'call is not foo')
+ with np.errstate(call=None):
+ assert_(np.geterrcall() is None, 'call is not None')
+ assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc
new file mode 100644
index 0000000..64952ea
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_errstate.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py
new file mode 100644
index 0000000..7c454a6
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.py
@@ -0,0 +1,221 @@
+from __future__ import division, absolute_import, print_function
+
+import itertools
+import contextlib
+import operator
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as mt
+
+from numpy.testing import assert_raises, assert_equal
+
+
+INT64_MAX = np.iinfo(np.int64).max
+INT64_MIN = np.iinfo(np.int64).min
+INT64_MID = 2**32
+
+# int128 is not two's complement, the sign bit is separate
+INT128_MAX = 2**128 - 1
+INT128_MIN = -INT128_MAX
+INT128_MID = 2**64
+
+INT64_VALUES = (
+ [INT64_MIN + j for j in range(20)] +
+ [INT64_MAX - j for j in range(20)] +
+ [INT64_MID + j for j in range(-20, 20)] +
+ [2*INT64_MID + j for j in range(-20, 20)] +
+ [INT64_MID//2 + j for j in range(-20, 20)] +
+ list(range(-70, 70))
+)
+
+INT128_VALUES = (
+ [INT128_MIN + j for j in range(20)] +
+ [INT128_MAX - j for j in range(20)] +
+ [INT128_MID + j for j in range(-20, 20)] +
+ [2*INT128_MID + j for j in range(-20, 20)] +
+ [INT128_MID//2 + j for j in range(-20, 20)] +
+ list(range(-70, 70)) +
+ [False] # negative zero
+)
+
+INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
+
+
+@contextlib.contextmanager
+def exc_iter(*args):
+ """
+ Iterate over Cartesian product of *args, and if an exception is raised,
+ add information of the current iterate.
+ """
+
+ value = [None]
+
+ def iterate():
+ for v in itertools.product(*args):
+ value[0] = v
+ yield v
+
+ try:
+ yield iterate()
+ except Exception:
+ import traceback
+ msg = "At: %r\n%s" % (repr(value[0]),
+ traceback.format_exc())
+ raise AssertionError(msg)
+
+
+def test_safe_binop():
+ # Test checked arithmetic routines
+
+ ops = [
+ (operator.add, 1),
+ (operator.sub, 2),
+ (operator.mul, 3)
+ ]
+
+ with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
+ for xop, a, b in it:
+ pyop, op = xop
+ c = pyop(a, b)
+
+ if not (INT64_MIN <= c <= INT64_MAX):
+ assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
+ else:
+ d = mt.extint_safe_binop(a, b, op)
+ if c != d:
+ # assert_equal is slow
+ assert_equal(d, c)
+
+
+def test_to_128():
+ with exc_iter(INT64_VALUES) as it:
+ for a, in it:
+ b = mt.extint_to_128(a)
+ if a != b:
+ assert_equal(b, a)
+
+
+def test_to_64():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if not (INT64_MIN <= a <= INT64_MAX):
+ assert_raises(OverflowError, mt.extint_to_64, a)
+ else:
+ b = mt.extint_to_64(a)
+ if a != b:
+ assert_equal(b, a)
+
+
+def test_mul_64_64():
+ with exc_iter(INT64_VALUES, INT64_VALUES) as it:
+ for a, b in it:
+ c = a * b
+ d = mt.extint_mul_64_64(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_add_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a + b
+ if not (INT128_MIN <= c <= INT128_MAX):
+ assert_raises(OverflowError, mt.extint_add_128, a, b)
+ else:
+ d = mt.extint_add_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_sub_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a - b
+ if not (INT128_MIN <= c <= INT128_MAX):
+ assert_raises(OverflowError, mt.extint_sub_128, a, b)
+ else:
+ d = mt.extint_sub_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_neg_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ b = -a
+ c = mt.extint_neg_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_shl_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if a < 0:
+ b = -(((-a) << 1) & (2**128-1))
+ else:
+ b = (a << 1) & (2**128-1)
+ c = mt.extint_shl_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_shr_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if a < 0:
+ b = -((-a) >> 1)
+ else:
+ b = a >> 1
+ c = mt.extint_shr_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_gt_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a > b
+ d = mt.extint_gt_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+@pytest.mark.slow
+def test_divmod_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ if a >= 0:
+ c, cr = divmod(a, b)
+ else:
+ c, cr = divmod(-a, b)
+ c = -c
+ cr = -cr
+
+ d, dr = mt.extint_divmod_128_64(a, b)
+
+ if c != d or d != dr or b*d + dr != a:
+ assert_equal(d, c)
+ assert_equal(dr, cr)
+ assert_equal(b*d + dr, a)
+
+
+def test_floordiv_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ c = a // b
+ d = mt.extint_floordiv_128_64(a, b)
+
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_ceildiv_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ c = (a + b - 1) // b
+ d = mt.extint_ceildiv_128_64(a, b)
+
+ if c != d:
+ assert_equal(d, c)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc
new file mode 100644
index 0000000..77946a7
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_extint128.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py
new file mode 100644
index 0000000..459baca
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_function_base.py
@@ -0,0 +1,364 @@
+from __future__ import division, absolute_import, print_function
+
+from numpy import (
+ logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
+ ndarray, sqrt, nextafter, stack
+ )
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
+ suppress_warnings
+ )
+
+
+class PhysicalQuantity(float):
+ def __new__(cls, value):
+ return float.__new__(cls, value)
+
+ def __add__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) + float(self))
+ __radd__ = __add__
+
+ def __sub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(self) - float(x))
+
+ def __rsub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) - float(self))
+
+ def __mul__(self, x):
+ return PhysicalQuantity(float(x) * float(self))
+ __rmul__ = __mul__
+
+ def __div__(self, x):
+ return PhysicalQuantity(float(self) / float(x))
+
+ def __rdiv__(self, x):
+ return PhysicalQuantity(float(x) / float(self))
+
+
+class PhysicalQuantity2(ndarray):
+ __array_priority__ = 10
+
+
+class TestLogspace(object):
+
+ def test_basic(self):
+ y = logspace(0, 6)
+ assert_(len(y) == 50)
+ y = logspace(0, 6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = logspace(0, 6, endpoint=0)
+ assert_(y[-1] < 10 ** 6)
+ y = logspace(0, 6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ def test_start_stop_array(self):
+ start = array([0., 1.])
+ stop = array([6., 7.])
+ t1 = logspace(start, stop, 6)
+ t2 = stack([logspace(_start, _stop, 6)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = logspace(start, stop[0], 6)
+ t4 = stack([logspace(_start, stop[0], 6)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = logspace(start, stop, 6, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_dtype(self):
+ y = logspace(0, 6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = logspace(0, 6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = logspace(0, 6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(logspace(a, b), logspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ ls = logspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0))
+ ls = logspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0, 1))
+
+
+class TestGeomspace(object):
+
+ def test_basic(self):
+ y = geomspace(1, 1e6)
+ assert_(len(y) == 50)
+ y = geomspace(1, 1e6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = geomspace(1, 1e6, endpoint=False)
+ assert_(y[-1] < 10 ** 6)
+ y = geomspace(1, 1e6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ y = geomspace(8, 2, num=3)
+ assert_allclose(y, [8, 4, 2])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-1, -100, num=3)
+ assert_array_equal(y, [-1, -10, -100])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-100, -1, num=3)
+ assert_array_equal(y, [-100, -10, -1])
+ assert_array_equal(y.imag, 0)
+
+ def test_complex(self):
+ # Purely imaginary
+ y = geomspace(1j, 16j, num=5)
+ assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(-4j, -324j, num=5)
+ assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(1+1j, 1000+1000j, num=4)
+ assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
+
+ y = geomspace(-1+1j, -1000+1000j, num=4)
+ assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
+
+ # Logarithmic spirals
+ y = geomspace(-1, 1, num=3, dtype=complex)
+ assert_allclose(y, [-1, 1j, +1])
+
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(0+3j, 3+0j, 3)
+ assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
+ y = geomspace(-3+0j, 0-3j, 3)
+ assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(-2-3j, 5+7j, 7)
+ assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
+ 2.08885354-4.34146838j, 4.58345529-3.16355218j,
+ 6.41401745-0.55233457j, 6.75707386+3.11795092j,
+ 5+7j])
+
+ # Type promotion should prevent the -5 from becoming a NaN
+ y = geomspace(3j, -5, 2)
+ assert_allclose(y, [3j, -5])
+ y = geomspace(-5, 3j, 2)
+ assert_allclose(y, [-5, 3j])
+
+ def test_dtype(self):
+ y = geomspace(1, 1e6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = geomspace(1, 1e6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = geomspace(1, 1e6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ # Native types
+ y = geomspace(1, 1e6, dtype=float)
+ assert_equal(y.dtype, dtype('float_'))
+ y = geomspace(1, 1e6, dtype=complex)
+ assert_equal(y.dtype, dtype('complex'))
+
+ def test_start_stop_array_scalar(self):
+ lim1 = array([120, 100], dtype="int8")
+ lim2 = array([-120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = geomspace(lim1[0], lim1[1], 5)
+ t2 = geomspace(lim2[0], lim2[1], 5)
+ t3 = geomspace(lim3[0], lim3[1], 5)
+ t4 = geomspace(120.0, 100.0, 5)
+ t5 = geomspace(-120.0, -100.0, 5)
+ t6 = geomspace(1200.0, 1000.0, 5)
+
+ # t3 uses float32, t6 uses float64
+ assert_allclose(t1, t4, rtol=1e-2)
+ assert_allclose(t2, t5, rtol=1e-2)
+ assert_allclose(t3, t6, rtol=1e-5)
+
+ def test_start_stop_array(self):
+ # Try to use all special cases.
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+ t1 = geomspace(start, stop, 5)
+ t2 = stack([geomspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = geomspace(start, stop[0], 5)
+ t4 = stack([geomspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = geomspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ gs = geomspace(a, b)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0))
+ gs = geomspace(a, b, 1)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0, 1))
+
+ def test_bounds(self):
+ assert_raises(ValueError, geomspace, 0, 10)
+ assert_raises(ValueError, geomspace, 10, 0)
+ assert_raises(ValueError, geomspace, 0, 0)
+
+
+class TestLinspace(object):
+
+ def test_basic(self):
+ y = linspace(0, 10)
+ assert_(len(y) == 50)
+ y = linspace(2, 10, num=100)
+ assert_(y[-1] == 10)
+ y = linspace(2, 10, endpoint=0)
+ assert_(y[-1] < 10)
+ assert_raises(ValueError, linspace, 0, 10, num=-1)
+
+ def test_corner(self):
+ y = list(linspace(0, 1, 1))
+ assert_(y == [0.0], y)
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
+ y = list(linspace(0, 1, 2.5))
+ assert_(y == [0.0, 1.0])
+
+ def test_type(self):
+ t1 = linspace(0, 1, 0).dtype
+ t2 = linspace(0, 1, 1).dtype
+ t3 = linspace(0, 1, 2).dtype
+ assert_equal(t1, t2)
+ assert_equal(t2, t3)
+
+ def test_dtype(self):
+ y = linspace(0, 6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = linspace(0, 6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = linspace(0, 6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ def test_start_stop_array_scalar(self):
+ lim1 = array([-120, 100], dtype="int8")
+ lim2 = array([120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = linspace(lim1[0], lim1[1], 5)
+ t2 = linspace(lim2[0], lim2[1], 5)
+ t3 = linspace(lim3[0], lim3[1], 5)
+ t4 = linspace(-120.0, 100.0, 5)
+ t5 = linspace(120.0, -100.0, 5)
+ t6 = linspace(1200.0, 1000.0, 5)
+ assert_equal(t1, t4)
+ assert_equal(t2, t5)
+ assert_equal(t3, t6)
+
+ def test_start_stop_array(self):
+ start = array([-120, 120], dtype="int8")
+ stop = array([100, -100], dtype="int8")
+ t1 = linspace(start, stop, 5)
+ t2 = stack([linspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = linspace(start, stop[0], 5)
+ t4 = stack([linspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = linspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_complex(self):
+ lim1 = linspace(1 + 2j, 3 + 4j, 5)
+ t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
+ lim2 = linspace(1j, 10, 5)
+ t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
+ assert_equal(lim1, t1)
+ assert_equal(lim2, t2)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(0.0)
+ b = PhysicalQuantity(1.0)
+ assert_equal(linspace(a, b), linspace(0.0, 1.0))
+
+ def test_subclass(self):
+ a = array(0).view(PhysicalQuantity2)
+ b = array(1).view(PhysicalQuantity2)
+ ls = linspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0))
+ ls = linspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0, 1))
+
+ def test_array_interface(self):
+ # Regression test for https://github.com/numpy/numpy/pull/6659
+ # Ensure that start/stop can be objects that implement
+ # __array_interface__ and are convertible to numeric scalars
+
+ class Arrayish(object):
+ """
+ A generic object that supports the __array_interface__ and hence
+ can in principle be converted to a numeric scalar, but is not
+ otherwise recognized as numeric, but also happens to support
+ multiplication by floats.
+
+ Data should be an object that implements the buffer interface,
+ and contains at least 4 bytes.
+ """
+
+ def __init__(self, data):
+ self._data = data
+
+ @property
+ def __array_interface__(self):
+ return {'shape': (), 'typestr': ' 1)
+ assert_(info.minexp < -1)
+ assert_(info.maxexp > 1)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc
new file mode 100644
index 0000000..c428a4d
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_getlimits.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py
new file mode 100644
index 0000000..7707125
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.py
@@ -0,0 +1,518 @@
+from __future__ import division, absolute_import, print_function
+
+import platform
+import pytest
+
+import numpy as np
+from numpy import uint16, float16, float32, float64
+from numpy.testing import assert_, assert_equal
+
+
+def assert_raises_fpe(strmatch, callable, *args, **kwargs):
+ try:
+ callable(*args, **kwargs)
+ except FloatingPointError as exc:
+ assert_(str(exc).find(strmatch) >= 0,
+ "Did not raise floating point %s error" % strmatch)
+ else:
+ assert_(False,
+ "Did not raise floating point %s error" % strmatch)
+
+class TestHalf(object):
+ def setup(self):
+ # An array of all possible float16 values
+ self.all_f16 = np.arange(0x10000, dtype=uint16)
+ self.all_f16.dtype = float16
+ self.all_f32 = np.array(self.all_f16, dtype=float32)
+ self.all_f64 = np.array(self.all_f16, dtype=float64)
+
+ # An array of all non-NaN float16 values, in sorted order
+ self.nonan_f16 = np.concatenate(
+ (np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
+ np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
+ self.nonan_f16.dtype = float16
+ self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
+ self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
+
+ # An array of all finite float16 values, in sorted order
+ self.finite_f16 = self.nonan_f16[1:-1]
+ self.finite_f32 = self.nonan_f32[1:-1]
+ self.finite_f64 = self.nonan_f64[1:-1]
+
+ def test_half_conversions(self):
+ """Checks that all 16-bit values survive conversion
+ to/from 32-bit and 64-bit float"""
+ # Because the underlying routines preserve the NaN bits, every
+ # value is preserved when converting to/from other floats.
+
+ # Convert from float32 back to float16
+ b = np.array(self.all_f32, dtype=float16)
+ assert_equal(self.all_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Convert from float64 back to float16
+ b = np.array(self.all_f64, dtype=float16)
+ assert_equal(self.all_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Convert float16 to longdouble and back
+ # This doesn't necessarily preserve the extra NaN bits,
+ # so exclude NaNs.
+ a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
+ b = np.array(a_ld, dtype=float16)
+ assert_equal(self.nonan_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Check the range for which all integers can be represented
+ i_int = np.arange(-2048, 2049)
+ i_f16 = np.array(i_int, dtype=float16)
+ j = np.array(i_f16, dtype=int)
+ assert_equal(i_int, j)
+
+ @pytest.mark.parametrize("offset", [None, "up", "down"])
+ @pytest.mark.parametrize("shift", [None, "up", "down"])
+ @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+ def test_half_conversion_rounding(self, float_t, shift, offset):
+ # Assumes that round to even is used during casting.
+ max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+ # Test all (positive) finite numbers, denormals are most interesting
+ # however:
+ f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+ f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+ # Shift the values by half a bit up or a down (or do not shift),
+ if shift == "up":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+ elif shift == "down":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+ else:
+ f16s_float = f16s_float[1:-1]
+
+ # Increase the float by a minimal value:
+ if offset == "up":
+ f16s_float = np.nextafter(f16s_float, float_t(1e50))
+ elif offset == "down":
+ f16s_float = np.nextafter(f16s_float, float_t(-1e50))
+
+ # Convert back to float16 and its bit pattern:
+ res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+ # The above calculations tries the original values, or the exact
+ # mid points between the float16 values. It then further offsets them
+ # by as little as possible. If no offset occurs, "round to even"
+ # logic will be necessary, an arbitrarily small offset should cause
+ # normal up/down rounding always.
+
+ # Calculate the expecte pattern:
+ cmp_patterns = f16s_patterns[1:-1].copy()
+
+ if shift == "down" and offset != "up":
+ shift_pattern = -1
+ elif shift == "up" and offset != "down":
+ shift_pattern = 1
+ else:
+ # There cannot be a shift, either shift is None, so all rounding
+ # will go back to original, or shift is reduced by offset too much.
+ shift_pattern = 0
+
+ # If rounding occurs, is it normal rounding or round to even?
+ if offset is None:
+ # Round to even occurs, modify only non-even, cast to allow + (-1)
+ cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+ else:
+ cmp_patterns.view(np.int16)[...] += shift_pattern
+
+ assert_equal(res_patterns, cmp_patterns)
+
+ @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+ [(np.float32, np.uint32, 23),
+ (np.float64, np.uint64, 52)])
+ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+ # Test specifically that all bits are considered when deciding
+ # whether round to even should occur (i.e. no bits are lost at the
+ # end. Compare also gh-12721. The most bits can get lost for the
+ # smallest denormal:
+ smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+ assert smallest_value == 2**-24
+
+ # Will be rounded to zero based on round to even rule:
+ rounded_to_zero = smallest_value / float_t(2)
+ assert rounded_to_zero.astype(np.float16) == 0
+
+ # The significand will be all 0 for the float_t, test that we do not
+ # lose the lower ones of these:
+ for i in range(bits):
+ # slightly increasing the value should make it round up:
+ larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+ larger_value = larger_pattern.view(float_t)
+ assert larger_value.astype(np.float16) == smallest_value
+
+ def test_nans_infs(self):
+ with np.errstate(all='ignore'):
+ # Check some of the ufuncs
+ assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
+ assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
+ assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
+ assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
+ assert_equal(np.spacing(float16(65504)), np.inf)
+
+ # Check comparisons of all values with NaN
+ nan = float16(np.nan)
+
+ assert_(not (self.all_f16 == nan).any())
+ assert_(not (nan == self.all_f16).any())
+
+ assert_((self.all_f16 != nan).all())
+ assert_((nan != self.all_f16).all())
+
+ assert_(not (self.all_f16 < nan).any())
+ assert_(not (nan < self.all_f16).any())
+
+ assert_(not (self.all_f16 <= nan).any())
+ assert_(not (nan <= self.all_f16).any())
+
+ assert_(not (self.all_f16 > nan).any())
+ assert_(not (nan > self.all_f16).any())
+
+ assert_(not (self.all_f16 >= nan).any())
+ assert_(not (nan >= self.all_f16).any())
+
+ def test_half_values(self):
+ """Confirms a small number of known half values"""
+ a = np.array([1.0, -1.0,
+ 2.0, -2.0,
+ 0.0999755859375, 0.333251953125, # 1/10, 1/3
+ 65504, -65504, # Maximum magnitude
+ 2.0**(-14), -2.0**(-14), # Minimum normal
+ 2.0**(-24), -2.0**(-24), # Minimum subnormal
+ 0, -1/1e1000, # Signed zeros
+ np.inf, -np.inf])
+ b = np.array([0x3c00, 0xbc00,
+ 0x4000, 0xc000,
+ 0x2e66, 0x3555,
+ 0x7bff, 0xfbff,
+ 0x0400, 0x8400,
+ 0x0001, 0x8001,
+ 0x0000, 0x8000,
+ 0x7c00, 0xfc00], dtype=uint16)
+ b.dtype = float16
+ assert_equal(a, b)
+
+ def test_half_rounding(self):
+ """Checks that rounding when converting to half is correct"""
+ a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
+ 2.0**-25, # Underflows to zero (nearest even mode)
+ 2.0**-26, # Underflows to zero
+ 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
+ 1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
+ 1.0+2.0**-12, # rounds to 1.0
+ 65519, # rounds to 65504
+ 65520], # rounds to inf
+ dtype=float64)
+ rounded = [2.0**-24,
+ 0.0,
+ 0.0,
+ 1.0+2.0**(-10),
+ 1.0,
+ 1.0,
+ 65504,
+ np.inf]
+
+ # Check float64->float16 rounding
+ b = np.array(a, dtype=float16)
+ assert_equal(b, rounded)
+
+ # Check float32->float16 rounding
+ a = np.array(a, dtype=float32)
+ b = np.array(a, dtype=float16)
+ assert_equal(b, rounded)
+
+ def test_half_correctness(self):
+ """Take every finite float16, and check the casting functions with
+ a manual conversion."""
+
+ # Create an array of all finite float16s
+ a_bits = self.finite_f16.view(dtype=uint16)
+
+ # Convert to 64-bit float manually
+ a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
+ a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
+ a_man = (a_bits & 0x03ff) * 2.0**(-10)
+ # Implicit bit of normalized floats
+ a_man[a_exp != -15] += 1
+ # Denormalized exponent is -14
+ a_exp[a_exp == -15] = -14
+
+ a_manual = a_sgn * a_man * 2.0**a_exp
+
+ a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
+ if len(a32_fail) != 0:
+ bad_index = a32_fail[0]
+ assert_equal(self.finite_f32, a_manual,
+ "First non-equal is half value %x -> %g != %g" %
+ (self.finite_f16[bad_index],
+ self.finite_f32[bad_index],
+ a_manual[bad_index]))
+
+ a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
+ if len(a64_fail) != 0:
+ bad_index = a64_fail[0]
+ assert_equal(self.finite_f64, a_manual,
+ "First non-equal is half value %x -> %g != %g" %
+ (self.finite_f16[bad_index],
+ self.finite_f64[bad_index],
+ a_manual[bad_index]))
+
+ def test_half_ordering(self):
+ """Make sure comparisons are working right"""
+
+ # All non-NaN float16 values in reverse order
+ a = self.nonan_f16[::-1].copy()
+
+ # 32-bit float copy
+ b = np.array(a, dtype=float32)
+
+ # Should sort the same
+ a.sort()
+ b.sort()
+ assert_equal(a, b)
+
+ # Comparisons should work
+ assert_((a[:-1] <= a[1:]).all())
+ assert_(not (a[:-1] > a[1:]).any())
+ assert_((a[1:] >= a[:-1]).all())
+ assert_(not (a[1:] < a[:-1]).any())
+ # All != except for +/-0
+ assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
+ assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
+
+ def test_half_funcs(self):
+ """Test the various ArrFuncs"""
+
+ # fill
+ assert_equal(np.arange(10, dtype=float16),
+ np.arange(10, dtype=float32))
+
+ # fillwithscalar
+ a = np.zeros((5,), dtype=float16)
+ a.fill(1)
+ assert_equal(a, np.ones((5,), dtype=float16))
+
+ # nonzero and copyswap
+ a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
+ assert_equal(a.nonzero()[0],
+ [2, 5, 6])
+ a = a.byteswap().newbyteorder()
+ assert_equal(a.nonzero()[0],
+ [2, 5, 6])
+
+ # dot
+ a = np.arange(0, 10, 0.5, dtype=float16)
+ b = np.ones((20,), dtype=float16)
+ assert_equal(np.dot(a, b),
+ 95)
+
+ # argmax
+ a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
+ assert_equal(a.argmax(),
+ 4)
+ a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
+ assert_equal(a.argmax(),
+ 5)
+
+ # getitem
+ a = np.arange(10, dtype=float16)
+ for i in range(10):
+ assert_equal(a.item(i), i)
+
+ def test_spacing_nextafter(self):
+ """Test np.spacing and np.nextafter"""
+ # All non-negative finite #'s
+ a = np.arange(0x7c00, dtype=uint16)
+ hinf = np.array((np.inf,), dtype=float16)
+ a_f16 = a.view(dtype=float16)
+
+ assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
+
+ assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
+ assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
+ assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
+
+ # switch to negatives
+ a |= 0x8000
+
+ assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
+ assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
+
+ assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
+ assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
+ assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
+
+ def test_half_ufuncs(self):
+ """Test the various ufuncs"""
+
+ a = np.array([0, 1, 2, 4, 2], dtype=float16)
+ b = np.array([-2, 5, 1, 4, 3], dtype=float16)
+ c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
+
+ assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
+ assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
+ assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
+ assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
+
+ assert_equal(np.equal(a, b), [False, False, False, True, False])
+ assert_equal(np.not_equal(a, b), [True, True, True, False, True])
+ assert_equal(np.less(a, b), [False, True, False, False, True])
+ assert_equal(np.less_equal(a, b), [False, True, False, True, True])
+ assert_equal(np.greater(a, b), [True, False, True, False, False])
+ assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
+ assert_equal(np.logical_and(a, b), [False, True, True, True, True])
+ assert_equal(np.logical_or(a, b), [True, True, True, True, True])
+ assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
+ assert_equal(np.logical_not(a), [True, False, False, False, False])
+
+ assert_equal(np.isnan(c), [False, False, False, True, False])
+ assert_equal(np.isinf(c), [False, False, True, False, False])
+ assert_equal(np.isfinite(c), [True, True, False, False, True])
+ assert_equal(np.signbit(b), [True, False, False, False, False])
+
+ assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
+
+ assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
+
+ x = np.maximum(b, c)
+ assert_(np.isnan(x[3]))
+ x[3] = 0
+ assert_equal(x, [0, 5, 1, 0, 6])
+
+ assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
+
+ x = np.minimum(b, c)
+ assert_(np.isnan(x[3]))
+ x[3] = 0
+ assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
+ assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
+ assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
+ assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
+ assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
+
+ assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
+ assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
+ assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
+ assert_equal(np.square(b), [4, 25, 1, 16, 9])
+ assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
+ assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
+ assert_equal(np.conjugate(b), b)
+ assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
+ assert_equal(np.negative(b), [2, -5, -1, -4, -3])
+ assert_equal(np.positive(b), b)
+ assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
+ assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
+ assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
+ assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
+
+ def test_half_coercion(self):
+ """Test that half gets coerced properly with the other types"""
+ a16 = np.array((1,), dtype=float16)
+ a32 = np.array((1,), dtype=float32)
+ b16 = float16(1)
+ b32 = float32(1)
+
+ assert_equal(np.power(a16, 2).dtype, float16)
+ assert_equal(np.power(a16, 2.0).dtype, float16)
+ assert_equal(np.power(a16, b16).dtype, float16)
+ assert_equal(np.power(a16, b32).dtype, float16)
+ assert_equal(np.power(a16, a16).dtype, float16)
+ assert_equal(np.power(a16, a32).dtype, float32)
+
+ assert_equal(np.power(b16, 2).dtype, float64)
+ assert_equal(np.power(b16, 2.0).dtype, float64)
+ assert_equal(np.power(b16, b16).dtype, float16)
+ assert_equal(np.power(b16, b32).dtype, float32)
+ assert_equal(np.power(b16, a16).dtype, float16)
+ assert_equal(np.power(b16, a32).dtype, float32)
+
+ assert_equal(np.power(a32, a16).dtype, float32)
+ assert_equal(np.power(a32, b16).dtype, float32)
+ assert_equal(np.power(b32, a16).dtype, float16)
+ assert_equal(np.power(b32, b16).dtype, float32)
+
+ @pytest.mark.skipif(platform.machine() == "armv5tel",
+ reason="See gh-413.")
+ def test_half_fpe(self):
+ with np.errstate(all='raise'):
+ sx16 = np.array((1e-4,), dtype=float16)
+ bx16 = np.array((1e4,), dtype=float16)
+ sy16 = float16(1e-4)
+ by16 = float16(1e4)
+
+ # Underflow errors
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14), float16(2**11))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(-2.**-14), float16(2**11))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14+2**-24), float16(2))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(-2.**-14-2**-24), float16(2))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14+2**-23), float16(4))
+
+ # Overflow errors
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
+ assert_raises_fpe('overflow', lambda a, b:a+b,
+ float16(65504), float16(17))
+ assert_raises_fpe('overflow', lambda a, b:a-b,
+ float16(-65504), float16(17))
+ assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
+ assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
+ assert_raises_fpe('overflow', np.spacing, float16(65504))
+
+ # Invalid value errors
+ assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
+ assert_raises_fpe('invalid', np.spacing, float16(np.inf))
+ assert_raises_fpe('invalid', np.spacing, float16(np.nan))
+ assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0))
+ assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0))
+ assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan))
+
+ # These should not raise
+ float16(65472)+float16(32)
+ float16(2**-13)/float16(2)
+ float16(2**-14)/float16(2**10)
+ np.spacing(float16(-65504))
+ np.nextafter(float16(65504), float16(-np.inf))
+ np.nextafter(float16(-65504), float16(np.inf))
+ float16(2**-14)/float16(2**10)
+ float16(-2**-14)/float16(2**10)
+ float16(2**-14+2**-23)/float16(2)
+ float16(-2**-14-2**-23)/float16(2)
+
+ def test_half_array_interface(self):
+ """Test that half is compatible with __array_interface__"""
+ class Dummy:
+ pass
+
+ a = np.ones((1,), dtype=float16)
+ b = Dummy()
+ b.__array_interface__ = a.__array_interface__
+ c = np.array(b)
+ assert_(c.dtype == float16)
+ assert_equal(a, c)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc
new file mode 100644
index 0000000..700c4f3
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_half.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py
new file mode 100644
index 0000000..63b43c4
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.py
@@ -0,0 +1,123 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from numpy.testing import assert_raises
+
+class TestIndexErrors(object):
+ '''Tests to exercise indexerrors not covered by other tests.'''
+
+ def test_arraytypes_fasttake(self):
+ 'take from a 0-length dimension'
+ x = np.empty((2, 3, 0, 4))
+ assert_raises(IndexError, x.take, [0], axis=2)
+ assert_raises(IndexError, x.take, [1], axis=2)
+ assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
+ assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
+
+ def test_take_from_object(self):
+ # Check exception taking from object array
+ d = np.zeros(5, dtype=object)
+ assert_raises(IndexError, d.take, [6])
+
+ # Check exception taking from 0-d array
+ d = np.zeros((5, 0), dtype=object)
+ assert_raises(IndexError, d.take, [1], axis=1)
+ assert_raises(IndexError, d.take, [0], axis=1)
+ assert_raises(IndexError, d.take, [0])
+ assert_raises(IndexError, d.take, [0], mode='wrap')
+ assert_raises(IndexError, d.take, [0], mode='clip')
+
+ def test_multiindex_exceptions(self):
+ a = np.empty(5, dtype=object)
+ assert_raises(IndexError, a.item, 20)
+ a = np.empty((5, 0), dtype=object)
+ assert_raises(IndexError, a.item, (0, 0))
+
+ a = np.empty(5, dtype=object)
+ assert_raises(IndexError, a.itemset, 20, 0)
+ a = np.empty((5, 0), dtype=object)
+ assert_raises(IndexError, a.itemset, (0, 0), 0)
+
+ def test_put_exceptions(self):
+ a = np.zeros((5, 5))
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5), dtype=object)
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5, 0))
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5, 0), dtype=object)
+ assert_raises(IndexError, a.put, 100, 0)
+
+ def test_iterators_exceptions(self):
+ "cases in iterators.c"
+ def assign(obj, ind, val):
+ obj[ind] = val
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a[0, 5, None, 2])
+ assert_raises(IndexError, lambda: a[0, 5, 0, 2])
+ assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
+ assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
+
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a[0, 0, None, 2])
+ assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[10])
+ assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[10])
+ assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array(10)])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array(10)])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array([10])])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array([10])])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+
+ def test_mapping(self):
+ "cases from mapping.c"
+
+ def assign(obj, ind, val):
+ obj[ind] = val
+
+ a = np.zeros((0, 10))
+ assert_raises(IndexError, lambda: a[12])
+
+ a = np.zeros((3, 5))
+ assert_raises(IndexError, lambda: a[(10, 20)])
+ assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
+ a = np.zeros((3, 0))
+ assert_raises(IndexError, lambda: a[(1, 0)])
+ assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
+
+ a = np.zeros((10,))
+ assert_raises(IndexError, lambda: assign(a, 10, 1))
+ a = np.zeros((0,))
+ assert_raises(IndexError, lambda: assign(a, 10, 1))
+
+ a = np.zeros((3, 5))
+ assert_raises(IndexError, lambda: a[(1, [1, 20])])
+ assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
+ a = np.zeros((3, 0))
+ assert_raises(IndexError, lambda: a[(1, [0, 1])])
+ assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
+
+ def test_methods(self):
+ "cases from methods.c"
+
+ a = np.zeros((3, 3))
+ assert_raises(IndexError, lambda: a.item(100))
+ assert_raises(IndexError, lambda: a.itemset(100, 1))
+ a = np.zeros((0, 3))
+ assert_raises(IndexError, lambda: a.item(100))
+ assert_raises(IndexError, lambda: a.itemset(100, 1))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc
new file mode 100644
index 0000000..4ad649a
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexerrors.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py
new file mode 100644
index 0000000..99792ce
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.py
@@ -0,0 +1,1325 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+import functools
+import operator
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import array_indexing
+from itertools import product
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
+ HAS_REFCOUNT, suppress_warnings,
+ )
+
+
+class TestIndexing(object):
+ def test_index_no_floats(self):
+ a = np.array([[[5]]])
+
+ assert_raises(IndexError, lambda: a[0.0])
+ assert_raises(IndexError, lambda: a[0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0])
+ assert_raises(IndexError, lambda: a[0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.0])
+ assert_raises(IndexError, lambda: a[:, 0.0,:])
+ assert_raises(IndexError, lambda: a[0.0,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0, 0])
+ assert_raises(IndexError, lambda: a[0, 0.0, 0])
+ assert_raises(IndexError, lambda: a[-1.4])
+ assert_raises(IndexError, lambda: a[0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0])
+ assert_raises(IndexError, lambda: a[-1.4,:])
+ assert_raises(IndexError, lambda: a[:, -1.4])
+ assert_raises(IndexError, lambda: a[:, -1.4,:])
+ assert_raises(IndexError, lambda: a[-1.4,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0, 0])
+ assert_raises(IndexError, lambda: a[0, -1.4, 0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+
+ def test_slicing_no_floats(self):
+ a = np.array([[5]])
+
+ # start as float.
+ assert_raises(TypeError, lambda: a[0.0:])
+ assert_raises(TypeError, lambda: a[0:, 0.0:2])
+ assert_raises(TypeError, lambda: a[0.0::2, :0])
+ assert_raises(TypeError, lambda: a[0.0:1:2,:])
+ assert_raises(TypeError, lambda: a[:, 0.0:])
+ # stop as float.
+ assert_raises(TypeError, lambda: a[:0.0])
+ assert_raises(TypeError, lambda: a[:0, 1:2.0])
+ assert_raises(TypeError, lambda: a[:0.0:2, :0])
+ assert_raises(TypeError, lambda: a[:0.0,:])
+ assert_raises(TypeError, lambda: a[:, 0:4.0:2])
+ # step as float.
+ assert_raises(TypeError, lambda: a[::1.0])
+ assert_raises(TypeError, lambda: a[0:, :2:2.0])
+ assert_raises(TypeError, lambda: a[1::4.0, :0])
+ assert_raises(TypeError, lambda: a[::5.0,:])
+ assert_raises(TypeError, lambda: a[:, 0:4:2.0])
+ # mixed.
+ assert_raises(TypeError, lambda: a[1.0:2:2.0])
+ assert_raises(TypeError, lambda: a[1.0::2.0])
+ assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
+ assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
+ assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
+ assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
+ # should still get the DeprecationWarning if step = 0.
+ assert_raises(TypeError, lambda: a[::0.0])
+
+ def test_index_no_array_to_index(self):
+ # No non-scalar arrays.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, lambda: a[a:a:a])
+
+ def test_none_index(self):
+ # `None` index adds newaxis
+ a = np.array([1, 2, 3])
+ assert_equal(a[None], a[np.newaxis])
+ assert_equal(a[None].ndim, a.ndim + 1)
+
+ def test_empty_tuple_index(self):
+ # Empty tuple index creates a view
+ a = np.array([1, 2, 3])
+ assert_equal(a[()], a)
+ assert_(a[()].base is a)
+ a = np.array(0)
+ assert_(isinstance(a[()], np.int_))
+
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
+ def test_same_kind_index_casting(self):
+ # Indexes should be cast with same-kind and not safe, even if that
+ # is somewhat unsafe. So test various different code paths.
+ index = np.arange(5)
+ u_index = index.astype(np.uintp)
+ arr = np.arange(10)
+
+ assert_array_equal(arr[index], arr[u_index])
+ arr[u_index] = np.arange(5)
+ assert_array_equal(arr, np.arange(10))
+
+ arr = np.arange(10).reshape(5, 2)
+ assert_array_equal(arr[index], arr[u_index])
+
+ arr[u_index] = np.arange(5)[:,None]
+ assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
+
+ arr = np.arange(25).reshape(5, 5)
+ assert_array_equal(arr[u_index, u_index], arr[index, index])
+
+ def test_empty_fancy_index(self):
+ # Empty list index creates an empty array
+ # with the same dtype (but with weird shape)
+ a = np.array([1, 2, 3])
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([], dtype=np.intp)
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([])
+ assert_raises(IndexError, a.__getitem__, b)
+
+ def test_ellipsis_index(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ assert_(a[...] is not a)
+ assert_equal(a[...], a)
+ # `a[...]` was `a` in numpy <1.9.
+ assert_(a[...].base is a)
+
+ # Slicing with ellipsis can skip an
+ # arbitrary number of dimensions
+ assert_equal(a[0, ...], a[0])
+ assert_equal(a[0, ...], a[0,:])
+ assert_equal(a[..., 0], a[:, 0])
+
+ # Slicing with ellipsis always results
+ # in an array, not a scalar
+ assert_equal(a[0, ..., 1], np.array(2))
+
+ # Assignment with `(Ellipsis,)` on 0-d arrays
+ b = np.array(1)
+ b[(Ellipsis,)] = 2
+ assert_equal(b, 2)
+
+ def test_single_int_index(self):
+ # Single integer index selects one row
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ assert_equal(a[0], [1, 2, 3])
+ assert_equal(a[-1], [7, 8, 9])
+
+ # Index out of bounds produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1 << 30)
+ # Index overflow produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1 << 64)
+
+ def test_single_bool_index(self):
+ # Single boolean index
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ assert_equal(a[np.array(True)], a[None])
+ assert_equal(a[np.array(False)], a[None][0:0])
+
+ def test_boolean_shape_mismatch(self):
+ arr = np.ones((5, 4, 3))
+
+ index = np.array([True])
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ index = np.array([False] * 6)
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ index = np.zeros((4, 4), dtype=bool)
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ assert_raises(IndexError, arr.__getitem__, (slice(None), index))
+
+ def test_boolean_indexing_onedim(self):
+ # Indexing a 2-dimensional array with
+ # boolean array of length one
+ a = np.array([[ 0., 0., 0.]])
+ b = np.array([ True], dtype=bool)
+ assert_equal(a[b], a)
+ # boolean assignment
+ a[b] = 1.
+ assert_equal(a, [[1., 1., 1.]])
+
+ def test_boolean_assignment_value_mismatch(self):
+ # A boolean assignment should fail when the shape of the values
+ # cannot be broadcast to the subscription. (see also gh-3458)
+ a = np.arange(4)
+
+ def f(a, v):
+ a[a > -1] = v
+
+ assert_raises(ValueError, f, a, [])
+ assert_raises(ValueError, f, a, [1, 2, 3])
+ assert_raises(ValueError, f, a[:1], [1, 2, 3])
+
+ def test_boolean_assignment_needs_api(self):
+ # See also gh-7666
+ # This caused a segfault on Python 2 due to the GIL not being
+ # held when the iterator does not need it, but the transfer function
+ # does
+ arr = np.zeros(1000)
+ indx = np.zeros(1000, dtype=bool)
+ indx[:100] = True
+ arr[indx] = np.ones(100, dtype=object)
+
+ expected = np.zeros(1000)
+ expected[:100] = 1
+ assert_array_equal(arr, expected)
+
+ def test_boolean_indexing_twodim(self):
+ # Indexing a 2-dimensional array with
+ # 2-dimensional boolean array
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ b = np.array([[ True, False, True],
+ [False, True, False],
+ [ True, False, True]])
+ assert_equal(a[b], [1, 3, 5, 7, 9])
+ assert_equal(a[b[1]], [[4, 5, 6]])
+ assert_equal(a[b[0]], a[b[2]])
+
+ # boolean assignment
+ a[b] = 0
+ assert_equal(a, [[0, 2, 0],
+ [4, 0, 6],
+ [0, 8, 0]])
+
+ def test_reverse_strides_and_subspace_bufferinit(self):
+ # This tests that the strides are not reversed for simple and
+ # subspace fancy indexing.
+ a = np.ones(5)
+ b = np.zeros(5, dtype=np.intp)[::-1]
+ c = np.arange(5)[::-1]
+
+ a[b] = c
+ # If the strides are not reversed, the 0 in the arange comes last.
+ assert_equal(a[0], 0)
+
+ # This also tests that the subspace buffer is initialized:
+ a = np.ones((5, 2))
+ c = np.arange(10).reshape(5, 2)[::-1]
+ a[b, :] = c
+ assert_equal(a[0], [0, 1])
+
+ def test_reversed_strides_result_allocation(self):
+ # Test a bug when calculating the output strides for a result array
+ # when the subspace size was 1 (and test other cases as well)
+ a = np.arange(10)[:, None]
+ i = np.arange(10)[::-1]
+ assert_array_equal(a[i], a[i.copy('C')])
+
+ a = np.arange(20).reshape(-1, 2)
+
+ def test_uncontiguous_subspace_assignment(self):
+ # During development there was a bug activating a skip logic
+ # based on ndim instead of size.
+ a = np.full((3, 4, 2), -1)
+ b = np.full((3, 4, 2), -1)
+
+ a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
+ b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
+
+ assert_equal(a, b)
+
+ def test_too_many_fancy_indices_special_case(self):
+ # Just documents behaviour, this is a small limitation.
+ a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
+ assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
+
+ def test_scalar_array_bool(self):
+ # NumPy bools can be used as boolean index (python ones as of yet not)
+ a = np.array(1)
+ assert_equal(a[np.bool_(True)], a[np.array(True)])
+ assert_equal(a[np.bool_(False)], a[np.array(False)])
+
+ # After deprecating bools as integers:
+ #a = np.array([0,1,2])
+ #assert_equal(a[True, :], a[None, :])
+ #assert_equal(a[:, True], a[:, None])
+ #
+ #assert_(not np.may_share_memory(a, a[True, :]))
+
+ def test_everything_returns_views(self):
+ # Before `...` would return a itself.
+ a = np.arange(5)
+
+ assert_(a is not a[()])
+ assert_(a is not a[...])
+ assert_(a is not a[:])
+
+ def test_broaderrors_indexing(self):
+ a = np.zeros((5, 5))
+ assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
+ assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
+
+ def test_trivial_fancy_out_of_bounds(self):
+ a = np.zeros(5)
+ ind = np.ones(20, dtype=np.intp)
+ ind[-1] = 10
+ assert_raises(IndexError, a.__getitem__, ind)
+ assert_raises(IndexError, a.__setitem__, ind, 0)
+ ind = np.ones(20, dtype=np.intp)
+ ind[0] = 11
+ assert_raises(IndexError, a.__getitem__, ind)
+ assert_raises(IndexError, a.__setitem__, ind, 0)
+
+ def test_trivial_fancy_not_possible(self):
+ # Test that the fast path for trivial assignment is not incorrectly
+ # used when the index is not contiguous or 1D, see also gh-11467.
+ a = np.arange(6)
+ idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
+ assert_array_equal(a[idx], idx)
+
+ # this case must not go into the fast path, note that idx is
+ # a non-contiuguous none 1D array here.
+ a[idx] = -1
+ res = np.arange(6)
+ res[0] = -1
+ res[3] = -1
+ assert_array_equal(a, res)
+
+ def test_nonbaseclass_values(self):
+ class SubClass(np.ndarray):
+ def __array_finalize__(self, old):
+ # Have array finalize do funny things
+ self.fill(99)
+
+ a = np.zeros((5, 5))
+ s = a.copy().view(type=SubClass)
+ s.fill(1)
+
+ a[[0, 1, 2, 3, 4], :] = s
+ assert_((a == 1).all())
+
+ # Subspace is last, so transposing might want to finalize
+ a[:, [0, 1, 2, 3, 4]] = s
+ assert_((a == 1).all())
+
+ a.fill(0)
+ a[...] = s
+ assert_((a == 1).all())
+
+ def test_subclass_writeable(self):
+ d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
+ dtype=[('target', 'S20'), ('V_mag', '>f4')])
+ ind = np.array([False, True, True], dtype=bool)
+ assert_(d[ind].flags.writeable)
+ ind = np.array([0, 1])
+ assert_(d[ind].flags.writeable)
+ assert_(d[...].flags.writeable)
+ assert_(d[0].flags.writeable)
+
+ def test_memory_order(self):
+ # This is not necessary to preserve. Memory layouts for
+ # more complex indices are not as simple.
+ a = np.arange(10)
+ b = np.arange(10).reshape(5,2).T
+ assert_(a[b].flags.f_contiguous)
+
+ # Takes a different implementation branch:
+ a = a.reshape(-1, 1)
+ assert_(a[b, 0].flags.f_contiguous)
+
+ def test_scalar_return_type(self):
+ # Full scalar indices should return scalars and object
+ # arrays should not call PyArray_Return on their items
+ class Zero(object):
+ # The most basic valid indexing
+ def __index__(self):
+ return 0
+
+ z = Zero()
+
+ class ArrayLike(object):
+ # Simple array, should behave like the array
+ def __array__(self):
+ return np.array(0)
+
+ a = np.zeros(())
+ assert_(isinstance(a[()], np.float_))
+ a = np.zeros(1)
+ assert_(isinstance(a[z], np.float_))
+ a = np.zeros((1, 1))
+ assert_(isinstance(a[z, np.array(0)], np.float_))
+ assert_(isinstance(a[z, ArrayLike()], np.float_))
+
+ # And object arrays do not call it too often:
+ b = np.array(0)
+ a = np.array(0, dtype=object)
+ a[()] = b
+ assert_(isinstance(a[()], np.ndarray))
+ a = np.array([b, None])
+ assert_(isinstance(a[z], np.ndarray))
+ a = np.array([[b, None]])
+ assert_(isinstance(a[z, np.array(0)], np.ndarray))
+ assert_(isinstance(a[z, ArrayLike()], np.ndarray))
+
+ def test_small_regressions(self):
+ # Reference count of intp for index checks
+ a = np.array([0])
+ if HAS_REFCOUNT:
+ refcount = sys.getrefcount(np.dtype(np.intp))
+ # item setting always checks indices in separate function:
+ a[np.array([0], dtype=np.intp)] = 1
+ a[np.array([0], dtype=np.uint8)] = 1
+ assert_raises(IndexError, a.__setitem__,
+ np.array([1], dtype=np.intp), 1)
+ assert_raises(IndexError, a.__setitem__,
+ np.array([1], dtype=np.uint8), 1)
+
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+
+ def test_unaligned(self):
+ v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
+ d = v.view(np.dtype("S8"))
+ # unaligned source
+ x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
+ x = x.view(np.dtype("S8"))
+ x[...] = np.array("b" * 8, dtype="S")
+ b = np.arange(d.size)
+ #trivial
+ assert_equal(d[b], d)
+ d[b] = x
+ # nontrivial
+ # unaligned index array
+ b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
+ b = b.view(np.intp)[:d.size]
+ b[...] = np.arange(d.size)
+ assert_equal(d[b.astype(np.int16)], d)
+ d[b.astype(np.int16)] = x
+ # boolean
+ d[b % 2 == 0]
+ d[b % 2 == 0] = x[::2]
+
+ def test_tuple_subclass(self):
+ arr = np.ones((5, 5))
+
+ # A tuple subclass should also be an nd-index
+ class TupleSubclass(tuple):
+ pass
+ index = ([1], [1])
+ index = TupleSubclass(index)
+ assert_(arr[index].shape == (1,))
+ # Unlike the non nd-index:
+ assert_(arr[index,].shape != (1,))
+
+ def test_broken_sequence_not_nd_index(self):
+ # See gh-5063:
+ # If we have an object which claims to be a sequence, but fails
+ # on item getting, this should not be converted to an nd-index (tuple)
+ # If this object happens to be a valid index otherwise, it should work
+ # This object here is very dubious and probably bad though:
+ class SequenceLike(object):
+ def __index__(self):
+ return 0
+
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, item):
+ raise IndexError('Not possible')
+
+ arr = np.arange(10)
+ assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+ # also test that field indexing does not segfault
+ # for a similar reason, by indexing a structured array
+ arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
+ assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+ def test_indexing_array_weird_strides(self):
+ # See also gh-6221
+ # the shapes used here come from the issue and create the correct
+ # size for the iterator buffering size.
+ x = np.ones(10)
+ x2 = np.ones((10, 2))
+ ind = np.arange(10)[:, None, None, None]
+ ind = np.broadcast_to(ind, (10, 55, 4, 4))
+
+ # single advanced index case
+ assert_array_equal(x[ind], x[ind.copy()])
+ # higher dimensional advanced index
+ zind = np.zeros(4, dtype=np.intp)
+ assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+
+ def test_indexing_array_negative_strides(self):
+ # From gh-8264,
+ # core dumps if negative strides are used in iteration
+ arro = np.zeros((4, 4))
+ arr = arro[::-1, ::-1]
+
+ slices = (slice(None), [0, 1, 2, 3])
+ arr[slices] = 10
+ assert_array_equal(arr, 10.)
+
+class TestFieldIndexing(object):
+ def test_scalar_return_type(self):
+ # Field access on an array should return an array, even if it
+ # is 0-d.
+ a = np.zeros((), [('a','f8')])
+ assert_(isinstance(a['a'], np.ndarray))
+ assert_(isinstance(a[['a']], np.ndarray))
+
+
+class TestBroadcastedAssignments(object):
+ def assign(self, a, ind, val):
+ a[ind] = val
+ return a
+
+ def test_prepending_ones(self):
+ a = np.zeros((3, 2))
+
+ a[...] = np.ones((1, 3, 2))
+ # Fancy with subspace with and without transpose
+ a[[0, 1, 2], :] = np.ones((1, 3, 2))
+ a[:, [0, 1]] = np.ones((1, 3, 2))
+ # Fancy without subspace (with broadcasting)
+ a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
+
+ def test_prepend_not_one(self):
+ assign = self.assign
+ s_ = np.s_
+ a = np.zeros(5)
+
+ # Too large and not only ones.
+ assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
+ assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
+ assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
+
+ def test_simple_broadcasting_errors(self):
+ assign = self.assign
+ s_ = np.s_
+ a = np.zeros((5, 1))
+
+ assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
+ assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
+ assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
+ assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
+ assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
+
+ def test_index_is_larger(self):
+ # Simple case of fancy index broadcasting of the index.
+ a = np.zeros((5, 5))
+ a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
+
+ assert_((a[:3, :3] == [2, 3, 4]).all())
+
+ def test_broadcast_subspace(self):
+ a = np.zeros((100, 100))
+ v = np.arange(100)[:,None]
+ b = np.arange(100)[::-1]
+ a[b] = v
+ assert_((a[::-1] == v).all())
+
+
+class TestSubclasses(object):
+ def test_basic(self):
+ # Test that indexing in various ways produces SubClass instances,
+ # and that the base is set up correctly: the original subclass
+ # instance for views, and a new ndarray for advanced/boolean indexing
+ # where a copy was made (latter a regression test for gh-11983).
+ class SubClass(np.ndarray):
+ pass
+
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s_slice = s[:3]
+ assert_(type(s_slice) is SubClass)
+ assert_(s_slice.base is s)
+ assert_array_equal(s_slice, a[:3])
+
+ s_fancy = s[[0, 1, 2]]
+ assert_(type(s_fancy) is SubClass)
+ assert_(s_fancy.base is not s)
+ assert_(type(s_fancy.base) is np.ndarray)
+ assert_array_equal(s_fancy, a[[0, 1, 2]])
+ assert_array_equal(s_fancy.base, a[[0, 1, 2]])
+
+ s_bool = s[s > 0]
+ assert_(type(s_bool) is SubClass)
+ assert_(s_bool.base is not s)
+ assert_(type(s_bool.base) is np.ndarray)
+ assert_array_equal(s_bool, a[a > 0])
+ assert_array_equal(s_bool.base, a[a > 0])
+
+ def test_finalize_gets_full_info(self):
+ # Array finalize should be called on the filled array.
+ class SubClass(np.ndarray):
+ def __array_finalize__(self, old):
+ self.finalize_status = np.array(self)
+ self.old = old
+
+ s = np.arange(10).view(SubClass)
+ new_s = s[:3]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+ new_s = s[[0,1,2,3]]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+ new_s = s[s > 0]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_slice_decref_getsetslice(self):
+ # See gh-10066, a temporary slice object should be discarted.
+ # This test is only really interesting on Python 2 since
+ # it goes through `__set/getslice__` here and can probably be
+ # removed. Use 0:7 to make sure it is never None:7.
+ class KeepIndexObject(np.ndarray):
+ def __getitem__(self, indx):
+ self.indx = indx
+ if indx == slice(0, 7):
+ raise ValueError
+
+ def __setitem__(self, indx, val):
+ self.indx = indx
+ if indx == slice(0, 4):
+ raise ValueError
+
+ k = np.array([1]).view(KeepIndexObject)
+ k[0:5]
+ assert_equal(k.indx, slice(0, 5))
+ assert_equal(sys.getrefcount(k.indx), 2)
+ try:
+ k[0:7]
+ raise AssertionError
+ except ValueError:
+ # The exception holds a reference to the slice so clear on Py2
+ if hasattr(sys, 'exc_clear'):
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ sys.exc_clear()
+ assert_equal(k.indx, slice(0, 7))
+ assert_equal(sys.getrefcount(k.indx), 2)
+
+ k[0:3] = 6
+ assert_equal(k.indx, slice(0, 3))
+ assert_equal(sys.getrefcount(k.indx), 2)
+ try:
+ k[0:4] = 2
+ raise AssertionError
+ except ValueError:
+ # The exception holds a reference to the slice so clear on Py2
+ if hasattr(sys, 'exc_clear'):
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ sys.exc_clear()
+ assert_equal(k.indx, slice(0, 4))
+ assert_equal(sys.getrefcount(k.indx), 2)
+
+
+class TestFancyIndexingCast(object):
+ def test_boolean_index_cast_assign(self):
+ # Setup the boolean index and float arrays.
+ shape = (8, 63)
+ bool_index = np.zeros(shape).astype(bool)
+ bool_index[0, 1] = True
+ zero_array = np.zeros(shape)
+
+ # Assigning float is fine.
+ zero_array[bool_index] = np.array([1])
+ assert_equal(zero_array[0, 1], 1)
+
+ # Fancy indexing works, although we get a cast warning.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
+ assert_equal(zero_array[0, 1], 2) # No complex part
+
+ # Cast complex to float, throwing away the imaginary portion.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, bool_index, np.array([1j]))
+ assert_equal(zero_array[0, 1], 0)
+
+class TestFancyIndexingEquivalence(object):
+ def test_object_assign(self):
+ # Check that the field and object special case using copyto is active.
+ # The right hand side cannot be converted to an array here.
+ a = np.arange(5, dtype=object)
+ b = a.copy()
+ a[:3] = [1, (1,2), 3]
+ b[[0, 1, 2]] = [1, (1,2), 3]
+ assert_array_equal(a, b)
+
+ # test same for subspace fancy indexing
+ b = np.arange(5, dtype=object)[None, :]
+ b[[0], :3] = [[1, (1,2), 3]]
+ assert_array_equal(a, b[0])
+
+ # Check that swapping of axes works.
+ # There was a bug that made the later assignment throw a ValueError
+ # do to an incorrectly transposed temporary right hand side (gh-5714)
+ b = b.T
+ b[:3, [0]] = [[1], [(1,2)], [3]]
+ assert_array_equal(a, b[:, 0])
+
+ # Another test for the memory order of the subspace
+ arr = np.ones((3, 4, 5), dtype=object)
+ # Equivalent slicing assignment for comparison
+ cmp_arr = arr.copy()
+ cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
+ arr = arr.copy('F')
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
+
+ def test_cast_equivalence(self):
+ # Yes, normal slicing uses unsafe casting.
+ a = np.arange(5)
+ b = a.copy()
+
+ a[:3] = np.array(['2', '-3', '-1'])
+ b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
+ assert_array_equal(a, b)
+
+ # test the same for subspace fancy indexing
+ b = np.arange(5)[None, :]
+ b[[0], :3] = np.array([['2', '-3', '-1']])
+ assert_array_equal(a, b[0])
+
+
+class TestMultiIndexingAutomated(object):
+ """
+ These tests use code to mimic the C-Code indexing for selection.
+
+ NOTE:
+
+ * This still lacks tests for complex item setting.
+ * If you change behavior of indexing, you might want to modify
+ these tests to try more combinations.
+ * Behavior was written to match numpy version 1.8. (though a
+ first version matched 1.7.)
+ * Only tuple indices are supported by the mimicking code.
+ (and tested as of writing this)
+ * Error types should match most of the time as long as there
+ is only one error. For multiple errors, what gets raised
+ will usually not be the same one. They are *not* tested.
+
+ Update 2016-11-30: It is probably not worth maintaining this test
+ indefinitely and it can be dropped if maintenance becomes a burden.
+
+ """
+
+ def setup(self):
+ self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
+ self.b = np.empty((3, 0, 5, 6))
+ self.complex_indices = ['skip', Ellipsis,
+ 0,
+ # Boolean indices, up to 3-d for some special cases of eating up
+ # dimensions, also need to test all False
+ np.array([True, False, False]),
+ np.array([[True, False], [False, True]]),
+ np.array([[[False, False], [False, False]]]),
+ # Some slices:
+ slice(-5, 5, 2),
+ slice(1, 1, 100),
+ slice(4, -1, -2),
+ slice(None, None, -3),
+ # Some Fancy indexes:
+ np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
+ np.array([0, 1, -2]),
+ np.array([[2], [0], [1]]),
+ np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
+ np.array([2, -1], dtype=np.int8),
+ np.zeros([1]*31, dtype=int), # trigger too large array.
+ np.array([0., 1.])] # invalid datatype
+ # Some simpler indices that still cover a bit more
+ self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
+ 'skip']
+ # Very simple ones to fill the rest:
+ self.fill_indices = [slice(None, None), 0]
+
+ def _get_multi_index(self, arr, indices):
+ """Mimic multi dimensional indexing.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed.
+ indices : tuple of index objects
+
+ Returns
+ -------
+ out : ndarray
+ An array equivalent to the indexing operation (but always a copy).
+ `arr[indices]` should be identical.
+ no_copy : bool
+ Whether the indexing operation requires a copy. If this is `True`,
+ `np.may_share_memory(arr, arr[indices])` should be `True` (with
+ some exceptions for scalars and possibly 0-d arrays).
+
+ Notes
+ -----
+ While the function may mostly match the errors of normal indexing this
+ is generally not the case.
+ """
+ in_indices = list(indices)
+ indices = []
+ # if False, this is a fancy or boolean index
+ no_copy = True
+ # number of fancy/scalar indexes that are not consecutive
+ num_fancy = 0
+ # number of dimensions indexed by a "fancy" index
+ fancy_dim = 0
+ # NOTE: This is a funny twist (and probably OK to change).
+ # The boolean array has illegal indexes, but this is
+ # allowed if the broadcast fancy-indices are 0-sized.
+ # This variable is to catch that case.
+ error_unless_broadcast_to_empty = False
+
+ # We need to handle Ellipsis and make arrays from indices, also
+ # check if this is fancy indexing (set no_copy).
+ ndim = 0
+ ellipsis_pos = None # define here mostly to replace all but first.
+ for i, indx in enumerate(in_indices):
+ if indx is None:
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ no_copy = False
+ if indx.ndim == 0:
+ raise IndexError
+ # boolean indices can have higher dimensions
+ ndim += indx.ndim
+ fancy_dim += indx.ndim
+ continue
+ if indx is Ellipsis:
+ if ellipsis_pos is None:
+ ellipsis_pos = i
+ continue # do not increment ndim counter
+ raise IndexError
+ if isinstance(indx, slice):
+ ndim += 1
+ continue
+ if not isinstance(indx, np.ndarray):
+ # This could be open for changes in numpy.
+ # numpy should maybe raise an error if casting to intp
+ # is not safe. It rejects np.array([1., 2.]) but not
+ # [1., 2.] as index (same for ie. np.take).
+ # (Note the importance of empty lists if changing this here)
+ try:
+ indx = np.array(indx, dtype=np.intp)
+ except ValueError:
+ raise IndexError
+ in_indices[i] = indx
+ elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
+ raise IndexError('arrays used as indices must be of '
+ 'integer (or boolean) type')
+ if indx.ndim != 0:
+ no_copy = False
+ ndim += 1
+ fancy_dim += 1
+
+ if arr.ndim - ndim < 0:
+ # we can't take more dimensions then we have, not even for 0-d
+ # arrays. since a[()] makes sense, but not a[(),]. We will
+ # raise an error later on, unless a broadcasting error occurs
+ # first.
+ raise IndexError
+
+ if ndim == 0 and None not in in_indices:
+ # Well we have no indexes or one Ellipsis. This is legal.
+ return arr.copy(), no_copy
+
+ if ellipsis_pos is not None:
+ in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
+ (arr.ndim - ndim))
+
+ for ax, indx in enumerate(in_indices):
+ if isinstance(indx, slice):
+ # convert to an index array
+ indx = np.arange(*indx.indices(arr.shape[ax]))
+ indices.append(['s', indx])
+ continue
+ elif indx is None:
+ # this is like taking a slice with one element from a new axis:
+ indices.append(['n', np.array([0], dtype=np.intp)])
+ arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ if indx.shape != arr.shape[ax:ax+indx.ndim]:
+ raise IndexError
+
+ try:
+ flat_indx = np.ravel_multi_index(np.nonzero(indx),
+ arr.shape[ax:ax+indx.ndim], mode='raise')
+ except Exception:
+ error_unless_broadcast_to_empty = True
+ # fill with 0s instead, and raise error later
+ flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
+ # concatenate axis into a single one:
+ if indx.ndim != 0:
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ + arr.shape[ax+indx.ndim:]))
+ indx = flat_indx
+ else:
+ # This could be changed, a 0-d boolean index can
+ # make sense (even outside the 0-d indexed array case)
+ # Note that originally this is could be interpreted as
+ # integer in the full integer special case.
+ raise IndexError
+ else:
+ # If the index is a singleton, the bounds check is done
+ # before the broadcasting. This used to be different in <1.9
+ if indx.ndim == 0:
+ if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
+ raise IndexError
+ if indx.ndim == 0:
+ # The index is a scalar. This used to be two fold, but if
+ # fancy indexing was active, the check was done later,
+ # possibly after broadcasting it away (1.7. or earlier).
+ # Now it is always done.
+ if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
+ raise IndexError
+ if (len(indices) > 0 and
+ indices[-1][0] == 'f' and
+ ax != ellipsis_pos):
+ # NOTE: There could still have been a 0-sized Ellipsis
+ # between them. Checked that with ellipsis_pos.
+ indices[-1].append(indx)
+ else:
+ # We have a fancy index that is not after an existing one.
+ # NOTE: A 0-d array triggers this as well, while one may
+ # expect it to not trigger it, since a scalar would not be
+ # considered fancy indexing.
+ num_fancy += 1
+ indices.append(['f', indx])
+
+ if num_fancy > 1 and not no_copy:
+ # We have to flush the fancy indexes left
+ new_indices = indices[:]
+ axes = list(range(arr.ndim))
+ fancy_axes = []
+ new_indices.insert(0, ['f'])
+ ni = 0
+ ai = 0
+ for indx in indices:
+ ni += 1
+ if indx[0] == 'f':
+ new_indices[0].extend(indx[1:])
+ del new_indices[ni]
+ ni -= 1
+ for ax in range(ai, ai + len(indx[1:])):
+ fancy_axes.append(ax)
+ axes.remove(ax)
+ ai += len(indx) - 1 # axis we are at
+ indices = new_indices
+ # and now we need to transpose arr:
+ arr = arr.transpose(*(fancy_axes + axes))
+
+ # We only have one 'f' index now and arr is transposed accordingly.
+ # Now handle newaxis by reshaping...
+ ax = 0
+ for indx in indices:
+ if indx[0] == 'f':
+ if len(indx) == 1:
+ continue
+ # First of all, reshape arr to combine fancy axes into one:
+ orig_shape = arr.shape
+ orig_slice = orig_shape[ax:ax + len(indx[1:])]
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(orig_slice).astype(int),)
+ + arr.shape[ax + len(indx[1:]):]))
+
+ # Check if broadcasting works
+ res = np.broadcast(*indx[1:])
+ # unfortunately the indices might be out of bounds. So check
+ # that first, and use mode='wrap' then. However only if
+ # there are any indices...
+ if res.size != 0:
+ if error_unless_broadcast_to_empty:
+ raise IndexError
+ for _indx, _size in zip(indx[1:], orig_slice):
+ if _indx.size == 0:
+ continue
+ if np.any(_indx >= _size) or np.any(_indx < -_size):
+ raise IndexError
+ if len(indx[1:]) == len(orig_slice):
+ if np.product(orig_slice) == 0:
+ # Work around for a crash or IndexError with 'wrap'
+ # in some 0-sized cases.
+ try:
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='raise')
+ except Exception:
+ # This happens with 0-sized orig_slice (sometimes?)
+ # here it is a ValueError, but indexing gives a:
+ raise IndexError('invalid index into 0-sized')
+ else:
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='wrap')
+ else:
+ # Maybe never happens...
+ raise ValueError
+ arr = arr.take(mi.ravel(), axis=ax)
+ try:
+ arr = arr.reshape((arr.shape[:ax]
+ + mi.shape
+ + arr.shape[ax+1:]))
+ except ValueError:
+ # too many dimensions, probably
+ raise IndexError
+ ax += mi.ndim
+ continue
+
+ # If we are here, we have a 1D array for take:
+ arr = arr.take(indx[1], axis=ax)
+ ax += 1
+
+ return arr, no_copy
+
+ def _check_multi_index(self, arr, index):
+ """Check a multi index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be a reshaped arange.
+ index : tuple of indexing objects
+ Index being tested.
+ """
+ # Test item getting
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, index)
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+ def _check_single_index(self, arr, index):
+ """Check a single index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be an arange.
+ index : indexing object
+ Index being tested. Must be a single index and not a tuple
+ of indexing objects (see also `_check_multi_index`).
+ """
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, (index,))
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+ def _compare_index_result(self, arr, index, mimic_get, no_copy):
+ """Compare mimicked result to indexing result.
+ """
+ arr = arr.copy()
+ indexed_arr = arr[index]
+ assert_array_equal(indexed_arr, mimic_get)
+ # Check if we got a view, unless its a 0-sized or 0-d array.
+ # (then its not a view, and that does not matter)
+ if indexed_arr.size != 0 and indexed_arr.ndim != 0:
+ assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
+ # Check reference count of the original array
+ if HAS_REFCOUNT:
+ if no_copy:
+ # refcount increases by one:
+ assert_equal(sys.getrefcount(arr), 3)
+ else:
+ assert_equal(sys.getrefcount(arr), 2)
+
+ # Test non-broadcast setitem:
+ b = arr.copy()
+ b[index] = mimic_get + 1000
+ if b.size == 0:
+ return # nothing to compare here...
+ if no_copy and indexed_arr.ndim != 0:
+ # change indexed_arr in-place to manipulate original:
+ indexed_arr += 1000
+ assert_array_equal(arr, b)
+ return
+ # Use the fact that the array is originally an arange:
+ arr.flat[indexed_arr.ravel()] += 1000
+ assert_array_equal(arr, b)
+
+ def test_boolean(self):
+ a = np.array(5)
+ assert_equal(a[np.array(True)], 5)
+ a[np.array(True)] = 1
+ assert_equal(a, 1)
+ # NOTE: This is different from normal broadcasting, as
+ # arr[boolean_array] works like in a multi index. Which means
+ # it is aligned to the left. This is probably correct for
+ # consistency with arr[boolean_array,] also no broadcasting
+ # is done at all
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool),))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
+
+ def test_multidim(self):
+ # Automatically test combinations with complex indexes on 2nd (or 1st)
+ # spot and the simple ones in one other spot.
+ with warnings.catch_warnings():
+ # This is so that np.array(True) is not accepted in a full integer
+ # index, when running the file separately.
+ warnings.filterwarnings('error', '', DeprecationWarning)
+ warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
+
+ def isskip(idx):
+ return isinstance(idx, str) and idx == "skip"
+
+ for simple_pos in [0, 2, 3]:
+ tocheck = [self.fill_indices, self.complex_indices,
+ self.fill_indices, self.fill_indices]
+ tocheck[simple_pos] = self.simple_indices
+ for index in product(*tocheck):
+ index = tuple(i for i in index if not isskip(i))
+ self._check_multi_index(self.a, index)
+ self._check_multi_index(self.b, index)
+
+ # Check very simple item getting:
+ self._check_multi_index(self.a, (0, 0, 0, 0))
+ self._check_multi_index(self.b, (0, 0, 0, 0))
+ # Also check (simple cases of) too many indices:
+ assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
+ assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
+ assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
+ assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
+
+ def test_1d(self):
+ a = np.arange(10)
+ for index in self.complex_indices:
+ self._check_single_index(a, index)
+
+class TestFloatNonIntegerArgument(object):
+ """
+ These test that ``TypeError`` is raised when you try to use
+ non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+ and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+ """
+ def test_valid_indexing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[np.array([0])]
+ a[[0, 0]]
+ a[:, [0, 0]]
+ a[:, 0,:]
+ a[:,:,:]
+
+ def test_valid_slicing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[::]
+ a[0:]
+ a[:2]
+ a[0:2]
+ a[::2]
+ a[1::2]
+ a[:2:2]
+ a[1:2:2]
+
+ def test_non_integer_argument_errors(self):
+ a = np.array([[5]])
+
+ assert_raises(TypeError, np.reshape, a, (1., 1., -1))
+ assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+ assert_raises(TypeError, np.take, a, [0], 1.)
+ assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+
+ def test_non_integer_sequence_multiplication(self):
+ # NumPy scalar sequence multiply should not work with non-integers
+ def mult(a, b):
+ return a * b
+
+ assert_raises(TypeError, mult, [1], np.float_(3))
+ # following should be OK
+ mult([1], np.int_(3))
+
+ def test_reduce_axis_float_index(self):
+ d = np.zeros((3,3,3))
+ assert_raises(TypeError, np.min, d, 0.5)
+ assert_raises(TypeError, np.min, d, (0.5, 1))
+ assert_raises(TypeError, np.min, d, (1, 2.2))
+ assert_raises(TypeError, np.min, d, (.2, 1.2))
+
+
+class TestBooleanIndexing(object):
+ # Using a boolean as integer argument/indexing is an error.
+ def test_bool_as_int_argument_errors(self):
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, np.reshape, a, (True, -1))
+ assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+ # Note that operator.index(np.array(True)) does not work, a boolean
+ # array is thus also deprecated, but not with the same message:
+ assert_raises(TypeError, operator.index, np.array(True))
+ assert_warns(DeprecationWarning, operator.index, np.True_)
+ assert_raises(TypeError, np.take, args=(a, [0], False))
+
+ def test_boolean_indexing_weirdness(self):
+ # Weird boolean indexing things
+ a = np.ones((2, 3, 4))
+ a[False, True, ...].shape == (0, 2, 3, 4)
+ a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
+ assert_raises(IndexError, lambda: a[False, [0, 1], ...])
+
+
+class TestArrayToIndexDeprecation(object):
+ """Creating an an index from array not 0-D is an error.
+
+ """
+ def test_array_to_index_error(self):
+ # so no exception is expected. The raising is effectively tested above.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, operator.index, np.array([1]))
+ assert_raises(TypeError, np.reshape, a, (a, -1))
+ assert_raises(TypeError, np.take, a, [0], a)
+
+
+class TestNonIntegerArrayLike(object):
+ """Tests that array_likes only valid if can safely cast to integer.
+
+ For instance, lists give IndexError when they cannot be safely cast to
+ an integer.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+
+ assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
+ assert_raises(IndexError, a.__getitem__, (['1', '2'],))
+
+ # The following is valid
+ a.__getitem__([])
+
+
+class TestMultipleEllipsisError(object):
+ """An index can only have a single ellipsis.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+ assert_raises(IndexError, lambda: a[..., ...])
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
+
+
+class TestCApiAccess(object):
+ def test_getitem(self):
+ subscript = functools.partial(array_indexing, 0)
+
+ # 0-d arrays don't work:
+ assert_raises(IndexError, subscript, np.ones(()), 0)
+ # Out of bound values:
+ assert_raises(IndexError, subscript, np.ones(10), 11)
+ assert_raises(IndexError, subscript, np.ones(10), -11)
+ assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
+ assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
+
+ a = np.arange(10)
+ assert_array_equal(a[4], subscript(a, 4))
+ a = a.reshape(5, 2)
+ assert_array_equal(a[-4], subscript(a, -4))
+
+ def test_setitem(self):
+ assign = functools.partial(array_indexing, 1)
+
+ # Deletion is impossible:
+ assert_raises(ValueError, assign, np.ones(10), 0)
+ # 0-d arrays don't work:
+ assert_raises(IndexError, assign, np.ones(()), 0, 0)
+ # Out of bound values:
+ assert_raises(IndexError, assign, np.ones(10), 11, 0)
+ assert_raises(IndexError, assign, np.ones(10), -11, 0)
+ assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
+ assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
+
+ a = np.arange(10)
+ assign(a, 4, 10)
+ assert_(a[4] == 10)
+
+ a = a.reshape(5, 2)
+ assign(a, 4, 10)
+ assert_array_equal(a[-1], [10, 10])
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc
new file mode 100644
index 0000000..2c6bb27
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_indexing.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py
new file mode 100644
index 0000000..3bc24fc
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.py
@@ -0,0 +1,87 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
+ )
+
+
+class TestTake(object):
+ def test_simple(self):
+ a = [[1, 2], [3, 4]]
+ a_str = [[b'1', b'2'], [b'3', b'4']]
+ modes = ['raise', 'wrap', 'clip']
+ indices = [-1, 4]
+ index_arrays = [np.empty(0, dtype=np.intp),
+ np.empty(tuple(), dtype=np.intp),
+ np.empty((1, 1), dtype=np.intp)]
+ real_indices = {'raise': {-1: 1, 4: IndexError},
+ 'wrap': {-1: 1, 4: 0},
+ 'clip': {-1: 0, 4: 1}}
+ # Currently all types but object, use the same function generation.
+ # So it should not be necessary to test all. However test also a non
+ # refcounted struct on top of object.
+ types = int, object, np.dtype([('', 'i', 2)])
+ for t in types:
+ # ta works, even if the array may be odd if buffer interface is used
+ ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
+ tresult = list(ta.T.copy())
+ for index_array in index_arrays:
+ if index_array.size != 0:
+ tresult[0].shape = (2,) + index_array.shape
+ tresult[1].shape = (2,) + index_array.shape
+ for mode in modes:
+ for index in indices:
+ real_index = real_indices[mode][index]
+ if real_index is IndexError and index_array.size != 0:
+ index_array.put(0, index)
+ assert_raises(IndexError, ta.take, index_array,
+ mode=mode, axis=1)
+ elif index_array.size != 0:
+ index_array.put(0, index)
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_array_equal(res, tresult[real_index])
+ else:
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_(res.shape == (2,) + index_array.shape)
+
+ def test_refcounting(self):
+ objects = [object() for i in range(10)]
+ for mode in ('raise', 'clip', 'wrap'):
+ a = np.array(objects)
+ b = np.array([2, 2, 4, 5, 3, 5])
+ a.take(b, out=a[:6], mode=mode)
+ del a
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+ # not contiguous, example:
+ a = np.array(objects * 2)[::2]
+ a.take(b, out=a[:6], mode=mode)
+ del a
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+
+ def test_unicode_mode(self):
+ d = np.arange(10)
+ k = b'\xc3\xa4'.decode("UTF8")
+ assert_raises(ValueError, d.take, 5, mode=k)
+
+ def test_empty_partition(self):
+ # In reference to github issue #6530
+ a_original = np.array([0, 2, 4, 6, 8, 10])
+ a = a_original.copy()
+
+ # An empty partition should be a successful no-op
+ a.partition(np.array([], dtype=np.int16))
+
+ assert_array_equal(a, a_original)
+
+ def test_empty_argpartition(self):
+ # In reference to github issue #6530
+ a = np.array([0, 2, 4, 6, 8, 10])
+ a = a.argpartition(np.array([], dtype=np.int16))
+
+ b = np.array([0, 1, 2, 3, 4, 5])
+ assert_array_equal(a, b)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc
new file mode 100644
index 0000000..f2edaf1
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_item_selection.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py
new file mode 100644
index 0000000..cf50d5d
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.py
@@ -0,0 +1,207 @@
+from __future__ import division, absolute_import, print_function
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ )
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+LD_INFO = np.finfo(np.longdouble)
+longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
+
+
+_o = 1 + LD_INFO.eps
+string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
+del _o
+
+
+def test_scalar_extraction():
+ """Confirm that extracting a value doesn't convert to python float"""
+ o = 1 + LD_INFO.eps
+ a = np.array([o, o, o])
+ assert_equal(a[1], o)
+
+
+# Conversions string -> long double
+
+# 0.1 not exactly representable in base 2 floating point.
+repr_precision = len(repr(np.longdouble(0.1)))
+# +2 from macro block starting around line 842 in scalartypes.c.src.
+@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
+ reason="repr precision not enough to show eps")
+def test_repr_roundtrip():
+ # We will only see eps in repr if within printing precision.
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
+
+
+def test_unicode():
+ np.longdouble(u"1.2")
+
+
+def test_string():
+ np.longdouble("1.2")
+
+
+def test_bytes():
+ np.longdouble(b"1.2")
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_repr_roundtrip_bytes():
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o).encode("ascii")), o)
+
+
+def test_bogus_string():
+ assert_raises(ValueError, np.longdouble, "spam")
+ assert_raises(ValueError, np.longdouble, "1.0 flub")
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_fromstring():
+ o = 1 + LD_INFO.eps
+ s = (" " + repr(o))*5
+ a = np.array([o]*5)
+ assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
+ err_msg="reading '%s'" % s)
+
+
+def test_fromstring_bogus():
+ assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+ np.array([1., 2., 3.]))
+
+
+def test_fromstring_empty():
+ assert_equal(np.fromstring("xxxxx", sep="x"),
+ np.array([]))
+
+
+def test_fromstring_missing():
+ assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+ np.array([1]))
+
+
+class TestFileBased(object):
+
+ ldbl = 1 + LD_INFO.eps
+ tgt = np.array([ldbl]*5)
+ out = ''.join([repr(t) + '\n' for t in tgt])
+
+ def test_fromfile_bogus(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1. 2. 3. flop 4.\n")
+ res = np.fromfile(path, dtype=float, sep=" ")
+ assert_equal(res, np.array([1., 2., 3.]))
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_fromfile(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.fromfile(path, dtype=np.longdouble, sep="\n")
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_genfromtxt(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.genfromtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_loadtxt(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.loadtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_tofile_roundtrip(self):
+ with temppath() as path:
+ self.tgt.tofile(path, sep=" ")
+ res = np.fromfile(path, dtype=np.longdouble, sep=" ")
+ assert_equal(res, self.tgt)
+
+
+# Conversions long double -> string
+
+
+def test_repr_exact():
+ o = 1 + LD_INFO.eps
+ assert_(repr(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_format():
+ o = 1 + LD_INFO.eps
+ assert_("{0:.40g}".format(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_percent():
+ o = 1 + LD_INFO.eps
+ assert_("%.40g" % o != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double,
+ reason="array repr problem")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_array_repr():
+ o = 1 + LD_INFO.eps
+ a = np.array([o])
+ b = np.array([1], dtype=np.longdouble)
+ if not np.all(a != b):
+ raise ValueError("precision loss creating arrays")
+ assert_(repr(a) != repr(b))
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+ def test_repr_roundtrip_foreign(self):
+ o = 1.5
+ assert_equal(o, np.longdouble(repr(o)))
+
+ def test_fromstring_foreign_repr(self):
+ f = 1.234
+ a = np.fromstring(repr(f), dtype=float, sep=" ")
+ assert_equal(a[0], f)
+
+ def test_fromstring_best_effort_float(self):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_best_effort(self):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_foreign(self):
+ s = "1.234"
+ a = np.fromstring(s, dtype=np.longdouble, sep=" ")
+ assert_equal(a[0], np.longdouble(s))
+
+ def test_fromstring_foreign_sep(self):
+ a = np.array([1, 2, 3, 4])
+ b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
+ assert_array_equal(a, b)
+
+ def test_fromstring_foreign_value(self):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc
new file mode 100644
index 0000000..5c804d4
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_longdouble.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py
new file mode 100644
index 0000000..ab8800c
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.py
@@ -0,0 +1,32 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from numpy.core.machar import MachAr
+import numpy.core.numerictypes as ntypes
+from numpy import errstate, array
+
+
+class TestMachAr(object):
+ def _run_machar_highprec(self):
+ # Instantiate MachAr instance with high enough precision to cause
+ # underflow
+ try:
+ hiprec = ntypes.float96
+ MachAr(lambda v:array([v], hiprec))
+ except AttributeError:
+ # Fixme, this needs to raise a 'skip' exception.
+ "Skipping test: no ntypes.float96 available on this platform."
+
+ def test_underlow(self):
+ # Regression test for #759:
+ # instantiating MachAr for dtype = np.float96 raises spurious warning.
+ with errstate(all='raise'):
+ try:
+ self._run_machar_highprec()
+ except FloatingPointError as e:
+ msg = "Caught %s exception, should not have been raised." % e
+ raise AssertionError(msg)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc
new file mode 100644
index 0000000..47bcff3
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_machar.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py
new file mode 100644
index 0000000..3c8e0e7
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.py
@@ -0,0 +1,950 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import itertools
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
+from numpy.core import _umath_tests
+from numpy.lib.stride_tricks import as_strided
+from numpy.compat import long
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_array_equal
+ )
+
+if sys.version_info[0] >= 3:
+ xrange = range
+
+
+ndims = 2
+size = 10
+shape = tuple([size] * ndims)
+
+MAY_SHARE_BOUNDS = 0
+MAY_SHARE_EXACT = -1
+
+
+def _indices_for_nelems(nelems):
+ """Returns slices of length nelems, from start onwards, in direction sign."""
+
+ if nelems == 0:
+ return [size // 2] # int index
+
+ res = []
+ for step in (1, 2):
+ for sign in (-1, 1):
+ start = size // 2 - nelems * step * sign // 2
+ stop = start + nelems * step * sign
+ res.append(slice(start, stop, step * sign))
+
+ return res
+
+
+def _indices_for_axis():
+ """Returns (src, dst) pairs of indices."""
+
+ res = []
+ for nelems in (0, 2, 3):
+ ind = _indices_for_nelems(nelems)
+
+ # no itertools.product available in Py2.4
+ res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems"
+
+ return res
+
+
+def _indices(ndims):
+ """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
+
+ ind = _indices_for_axis()
+
+ # no itertools.product available in Py2.4
+
+ res = [[]]
+ for i in range(ndims):
+ newres = []
+ for elem in ind:
+ for others in res:
+ newres.append([elem] + others)
+ res = newres
+
+ return res
+
+
+def _check_assignment(srcidx, dstidx):
+ """Check assignment arr[dstidx] = arr[srcidx] works."""
+
+ arr = np.arange(np.product(shape)).reshape(shape)
+
+ cpy = arr.copy()
+
+ cpy[dstidx] = arr[srcidx]
+ arr[dstidx] = arr[srcidx]
+
+ assert_(np.all(arr == cpy),
+ 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
+
+
+def test_overlapping_assignments():
+ # Test automatically generated assignments which overlap in memory.
+
+ inds = _indices(ndims)
+
+ for ind in inds:
+ srcidx = tuple([a[0] for a in ind])
+ dstidx = tuple([a[1] for a in ind])
+
+ _check_assignment(srcidx, dstidx)
+
+
+@pytest.mark.slow
+def test_diophantine_fuzz():
+ # Fuzz test the diophantine solver
+ rng = np.random.RandomState(1234)
+
+ max_int = np.iinfo(np.intp).max
+
+ for ndim in range(10):
+ feasible_count = 0
+ infeasible_count = 0
+
+ min_count = 500//(ndim + 1)
+
+ while min(feasible_count, infeasible_count) < min_count:
+ # Ensure big and small integer problems
+ A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
+ U_max = rng.randint(0, 11, dtype=np.intp)**6
+
+ A_max = min(max_int, A_max)
+ U_max = min(max_int-1, U_max)
+
+ A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
+ for j in range(ndim))
+ U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
+ for j in range(ndim))
+
+ b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
+ b = rng.randint(-1, b_ub+2, dtype=np.intp)
+
+ if ndim == 0 and feasible_count < min_count:
+ b = 0
+
+ X = solve_diophantine(A, U, b)
+
+ if X is None:
+ # Check the simplified decision problem agrees
+ X_simplified = solve_diophantine(A, U, b, simplify=1)
+ assert_(X_simplified is None, (A, U, b, X_simplified))
+
+ # Check no solution exists (provided the problem is
+ # small enough so that brute force checking doesn't
+ # take too long)
+ try:
+ ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U))
+ except OverflowError:
+ # xrange on 32-bit Python 2 may overflow
+ continue
+
+ size = 1
+ for r in ranges:
+ size *= len(r)
+ if size < 100000:
+ assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
+ infeasible_count += 1
+ else:
+ # Check the simplified decision problem agrees
+ X_simplified = solve_diophantine(A, U, b, simplify=1)
+ assert_(X_simplified is not None, (A, U, b, X_simplified))
+
+ # Check validity
+ assert_(sum(a*x for a, x in zip(A, X)) == b)
+ assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
+ feasible_count += 1
+
+
+def test_diophantine_overflow():
+ # Smoke test integer overflow detection
+ max_intp = np.iinfo(np.intp).max
+ max_int64 = np.iinfo(np.int64).max
+
+ if max_int64 <= max_intp:
+ # Check that the algorithm works internally in 128-bit;
+ # solving this problem requires large intermediate numbers
+ A = (max_int64//2, max_int64//2 - 10)
+ U = (max_int64//2, max_int64//2 - 10)
+ b = 2*(max_int64//2) - 10
+
+ assert_equal(solve_diophantine(A, U, b), (1, 1))
+
+
+def check_may_share_memory_exact(a, b):
+ got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+ assert_equal(np.may_share_memory(a, b),
+ np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
+
+ a.fill(0)
+ b.fill(0)
+ a.fill(1)
+ exact = b.any()
+
+ err_msg = ""
+ if got != exact:
+ err_msg = " " + "\n ".join([
+ "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
+ "shape_a = %r" % (a.shape,),
+ "shape_b = %r" % (b.shape,),
+ "strides_a = %r" % (a.strides,),
+ "strides_b = %r" % (b.strides,),
+ "size_a = %r" % (a.size,),
+ "size_b = %r" % (b.size,)
+ ])
+
+ assert_equal(got, exact, err_msg=err_msg)
+
+
+def test_may_share_memory_manual():
+ # Manual test cases for may_share_memory
+
+ # Base arrays
+ xs0 = [
+ np.zeros([13, 21, 23, 22], dtype=np.int8),
+ np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
+ ]
+
+ # Generate all negative stride combinations
+ xs = []
+ for x in xs0:
+ for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
+ xp = x[ss]
+ xs.append(xp)
+
+ for x in xs:
+ # The default is a simple extent check
+ assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
+ assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
+
+ # Exact checks
+ check_may_share_memory_exact(x[:,0,:], x[:,1,:])
+ check_may_share_memory_exact(x[:,::7], x[:,3::3])
+
+ try:
+ xp = x.ravel()
+ if xp.flags.owndata:
+ continue
+ xp = xp.view(np.int16)
+ except ValueError:
+ continue
+
+ # 0-size arrays cannot overlap
+ check_may_share_memory_exact(x.ravel()[6:6],
+ xp.reshape(13, 21, 23, 11)[:,::7])
+
+ # Test itemsize is dealt with
+ check_may_share_memory_exact(x[:,::7],
+ xp.reshape(13, 21, 23, 11))
+ check_may_share_memory_exact(x[:,::7],
+ xp.reshape(13, 21, 23, 11)[:,3::3])
+ check_may_share_memory_exact(x.ravel()[6:7],
+ xp.reshape(13, 21, 23, 11)[:,::7])
+
+ # Check unit size
+ x = np.zeros([1], dtype=np.int8)
+ check_may_share_memory_exact(x, x)
+ check_may_share_memory_exact(x, x.copy())
+
+
+def iter_random_view_pairs(x, same_steps=True, equal_size=False):
+ rng = np.random.RandomState(1234)
+
+ if equal_size and same_steps:
+ raise ValueError()
+
+ def random_slice(n, step):
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
+ stop, start = start, stop
+ step *= -1
+ return slice(start, stop, step)
+
+ def random_slice_fixed_size(n, step, size):
+ start = rng.randint(0, n+1 - size*step)
+ stop = start + (size-1)*step + 1
+ if rng.randint(0, 2) == 0:
+ stop, start = start-1, stop-1
+ if stop < 0:
+ stop = None
+ step *= -1
+ return slice(start, stop, step)
+
+ # First a few regular views
+ yield x, x
+ for j in range(1, 7, 3):
+ yield x[j:], x[:-j]
+ yield x[...,j:], x[...,:-j]
+
+ # An array with zero stride internal overlap
+ strides = list(x.strides)
+ strides[0] = 0
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # An array with non-zero stride internal overlap
+ strides = list(x.strides)
+ if strides[0] > 1:
+ strides[0] = 1
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # Then discontiguous views
+ while True:
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+ s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+
+ t1 = np.arange(x.ndim)
+ rng.shuffle(t1)
+
+ if equal_size:
+ t2 = t1
+ else:
+ t2 = np.arange(x.ndim)
+ rng.shuffle(t2)
+
+ a = x[s1]
+
+ if equal_size:
+ if a.size == 0:
+ continue
+
+ steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
+ if rng.randint(0, 5) == 0 else 1
+ for p, s, pa in zip(x.shape, s1, a.shape))
+ s2 = tuple(random_slice_fixed_size(p, s, pa)
+ for p, s, pa in zip(x.shape, steps2, a.shape))
+ elif same_steps:
+ steps2 = steps
+ else:
+ steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+
+ if not equal_size:
+ s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
+
+ a = a.transpose(t1)
+ b = x[s2].transpose(t2)
+
+ yield a, b
+
+
+def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
+ # Check that overlap problems with common strides are solved with
+ # little work.
+ x = np.zeros([17,34,71,97], dtype=np.int16)
+
+ feasible = 0
+ infeasible = 0
+
+ pair_iter = iter_random_view_pairs(x, same_steps)
+
+ while min(feasible, infeasible) < min_count:
+ a, b = next(pair_iter)
+
+ bounds_overlap = np.may_share_memory(a, b)
+ may_share_answer = np.may_share_memory(a, b)
+ easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
+ exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+ if easy_answer != exact_answer:
+ # assert_equal is slow...
+ assert_equal(easy_answer, exact_answer)
+
+ if may_share_answer != bounds_overlap:
+ assert_equal(may_share_answer, bounds_overlap)
+
+ if bounds_overlap:
+ if exact_answer:
+ feasible += 1
+ else:
+ infeasible += 1
+
+
+@pytest.mark.slow
+def test_may_share_memory_easy_fuzz():
+ # Check that overlap problems with common strides are always
+ # solved with little work.
+
+ check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
+ same_steps=True,
+ min_count=2000)
+
+
+@pytest.mark.slow
+def test_may_share_memory_harder_fuzz():
+ # Overlap problems with not necessarily common strides take more
+ # work.
+ #
+ # The work bound below can't be reduced much. Harder problems can
+ # also exist but not be detected here, as the set of problems
+ # comes from RNG.
+
+ check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
+ same_steps=False,
+ min_count=2000)
+
+
+def test_shares_memory_api():
+ x = np.zeros([4, 5, 6], dtype=np.int8)
+
+ assert_equal(np.shares_memory(x, x), True)
+ assert_equal(np.shares_memory(x, x.copy()), False)
+
+ a = x[:,::2,::3]
+ b = x[:,::3,::2]
+ assert_equal(np.shares_memory(a, b), True)
+ assert_equal(np.shares_memory(a, b, max_work=None), True)
+ assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
+ assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1))
+
+
+def test_may_share_memory_bad_max_work():
+ x = np.zeros([1])
+ assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
+ assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
+
+
+def test_internal_overlap_diophantine():
+ def check(A, U, exists=None):
+ X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
+
+ if exists is None:
+ exists = (X is not None)
+
+ if X is not None:
+ assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
+ assert_(all(0 <= x <= u for x, u in zip(X, U)))
+ assert_(any(x != u//2 for x, u in zip(X, U)))
+
+ if exists:
+ assert_(X is not None, repr(X))
+ else:
+ assert_(X is None, repr(X))
+
+ # Smoke tests
+ check((3, 2), (2*2, 3*2), exists=True)
+ check((3*2, 2), (15*2, (3-1)*2), exists=False)
+
+
+def test_internal_overlap_slices():
+ # Slicing an array never generates internal overlap
+
+ x = np.zeros([17,34,71,97], dtype=np.int16)
+
+ rng = np.random.RandomState(1234)
+
+ def random_slice(n, step):
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
+ stop, start = start, stop
+ step *= -1
+ return slice(start, stop, step)
+
+ cases = 0
+ min_count = 5000
+
+ while cases < min_count:
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+ t1 = np.arange(x.ndim)
+ rng.shuffle(t1)
+ s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+ a = x[s1].transpose(t1)
+
+ assert_(not internal_overlap(a))
+ cases += 1
+
+
+def check_internal_overlap(a, manual_expected=None):
+ got = internal_overlap(a)
+
+ # Brute-force check
+ m = set()
+ ranges = tuple(xrange(n) for n in a.shape)
+ for v in itertools.product(*ranges):
+ offset = sum(s*w for s, w in zip(a.strides, v))
+ if offset in m:
+ expected = True
+ break
+ else:
+ m.add(offset)
+ else:
+ expected = False
+
+ # Compare
+ if got != expected:
+ assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
+ if manual_expected is not None and expected != manual_expected:
+ assert_equal(expected, manual_expected)
+ return got
+
+
+def test_internal_overlap_manual():
+ # Stride tricks can construct arrays with internal overlap
+
+ # We don't care about memory bounds, the array is not
+ # read/write accessed
+ x = np.arange(1).astype(np.int8)
+
+ # Check low-dimensional special cases
+
+ check_internal_overlap(x, False) # 1-dim
+ check_internal_overlap(x.reshape([]), False) # 0-dim
+
+ a = as_strided(x, strides=(3, 4), shape=(4, 4))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(3, 4), shape=(5, 4))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0,), shape=(0,))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0,), shape=(1,))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0,), shape=(2,))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0, -9993), shape=(87, 22))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0, -9993), shape=(1, 22))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0, -9993), shape=(0, 22))
+ check_internal_overlap(a, False)
+
+
+def test_internal_overlap_fuzz():
+ # Fuzz check; the brute-force check is fairly slow
+
+ x = np.arange(1).astype(np.int8)
+
+ overlap = 0
+ no_overlap = 0
+ min_count = 100
+
+ rng = np.random.RandomState(1234)
+
+ while min(overlap, no_overlap) < min_count:
+ ndim = rng.randint(1, 4, dtype=np.intp)
+
+ strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
+ for j in range(ndim))
+ shape = tuple(rng.randint(1, 30, dtype=np.intp)
+ for j in range(ndim))
+
+ a = as_strided(x, strides=strides, shape=shape)
+ result = check_internal_overlap(a)
+
+ if result:
+ overlap += 1
+ else:
+ no_overlap += 1
+
+
+def test_non_ndarray_inputs():
+ # Regression check for gh-5604
+
+ class MyArray(object):
+ def __init__(self, data):
+ self.data = data
+
+ @property
+ def __array_interface__(self):
+ return self.data.__array_interface__
+
+ class MyArray2(object):
+ def __init__(self, data):
+ self.data = data
+
+ def __array__(self):
+ return self.data
+
+ for cls in [MyArray, MyArray2]:
+ x = np.arange(5)
+
+ assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
+ assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
+
+ assert_(np.shares_memory(cls(x[1::3]), x[::2]))
+ assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
+
+
+def view_element_first_byte(x):
+ """Construct an array viewing the first byte of each element of `x`"""
+ from numpy.lib.stride_tricks import DummyArray
+ interface = dict(x.__array_interface__)
+ interface['typestr'] = '|b1'
+ interface['descr'] = [('', '|b1')]
+ return np.asarray(DummyArray(interface, x))
+
+
+def assert_copy_equivalent(operation, args, out, **kwargs):
+ """
+ Check that operation(*args, out=out) produces results
+ equivalent to out[...] = operation(*args, out=out.copy())
+ """
+
+ kwargs['out'] = out
+ kwargs2 = dict(kwargs)
+ kwargs2['out'] = out.copy()
+
+ out_orig = out.copy()
+ out[...] = operation(*args, **kwargs2)
+ expected = out.copy()
+ out[...] = out_orig
+
+ got = operation(*args, **kwargs).copy()
+
+ if (got != expected).any():
+ assert_equal(got, expected)
+
+
+class TestUFunc(object):
+ """
+ Test ufunc call memory overlap handling
+ """
+
+ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
+ count=5000):
+ shapes = [7, 13, 8, 21, 29, 32]
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(1, 6):
+ x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = count // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ if get_out_axis_size is None:
+ assert_copy_equivalent(operation, [a], out=b)
+
+ if np.shares_memory(a, b):
+ overlapping += 1
+ else:
+ for axis in itertools.chain(range(ndim), [None]):
+ a[...] = a_orig
+ b[...] = b_orig
+
+ # Determine size for reduction axis (None if scalar)
+ outsize, scalarize = get_out_axis_size(a, b, axis)
+ if outsize == 'skip':
+ continue
+
+ # Slice b to get an output array of the correct size
+ sl = [slice(None)] * ndim
+ if axis is None:
+ if outsize is None:
+ sl = [slice(0, 1)] + [0]*(ndim - 1)
+ else:
+ sl = [slice(0, outsize)] + [0]*(ndim - 1)
+ else:
+ if outsize is None:
+ k = b.shape[axis]//2
+ if ndim == 1:
+ sl[axis] = slice(k, k + 1)
+ else:
+ sl[axis] = k
+ else:
+ assert b.shape[axis] >= outsize
+ sl[axis] = slice(0, outsize)
+ b_out = b[tuple(sl)]
+
+ if scalarize:
+ b_out = b_out.reshape([])
+
+ if np.shares_memory(a, b_out):
+ overlapping += 1
+
+ # Check result
+ assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
+
+ @pytest.mark.slow
+ def test_unary_ufunc_call_fuzz(self):
+ self.check_unary_fuzz(np.invert, None, np.int16)
+
+ def test_binary_ufunc_accumulate_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # accumulate doesn't support this
+ else:
+ return a.shape[axis], False
+
+ self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduce_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ return None, (axis is None or a.ndim == 1)
+
+ self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # reduceat doesn't support this
+ else:
+ return a.shape[axis], False
+
+ def do_reduceat(a, out, axis):
+ if axis is None:
+ size = len(a)
+ step = size//len(out)
+ else:
+ size = a.shape[axis]
+ step = a.shape[axis] // out.shape[axis]
+ idx = np.arange(0, size, step)
+ return np.add.reduceat(a, idx, out=out, axis=axis)
+
+ self.check_unary_fuzz(do_reduceat, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_manual(self):
+ def check(ufunc, a, ind, out):
+ c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
+ c2 = ufunc.reduceat(a, ind, out=out)
+ assert_array_equal(c1, c2)
+
+ # Exactly same input/output arrays
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1].copy(), a)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1], a)
+
+ def test_unary_gufunc_fuzz(self):
+ shapes = [7, 13, 8, 21, 29, 32]
+ gufunc = _umath_tests.euclidean_pdist
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(2, 6):
+ x = rng.rand(*shapes[:ndim])
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = 500 // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
+ continue
+
+ # Ensure the shapes are so that euclidean_pdist is happy
+ if b.shape[-1] > b.shape[-2]:
+ b = b[...,0,:]
+ else:
+ b = b[...,:,0]
+
+ n = a.shape[-2]
+ p = n * (n - 1) // 2
+ if p <= b.shape[-1] and p > 0:
+ b = b[...,:p]
+ else:
+ n = max(2, int(np.sqrt(b.shape[-1]))//2)
+ p = n * (n - 1) // 2
+ a = a[...,:n,:]
+ b = b[...,:p]
+
+ # Call
+ if np.shares_memory(a, b):
+ overlapping += 1
+
+ with np.errstate(over='ignore', invalid='ignore'):
+ assert_copy_equivalent(gufunc, [a], out=b)
+
+ def test_ufunc_at_manual(self):
+ def check(ufunc, a, ind, b=None):
+ a0 = a.copy()
+ if b is None:
+ ufunc.at(a0, ind.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind)
+ c2 = a.copy()
+ else:
+ ufunc.at(a0, ind.copy(), b.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind, b)
+ c2 = a.copy()
+ assert_array_equal(c1, c2)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.invert, a[::-1], a)
+
+ # Overlap with second data array
+ a = np.arange(100, dtype=np.int16)
+ ind = np.arange(0, 100, 2, dtype=np.int16)
+ check(np.add, a, ind, a[25:75])
+
+ def test_unary_ufunc_1d_manual(self):
+ # Exercise branches in PyArray_EQUIVALENTLY_ITERABLE
+
+ def check(a, b):
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ b0 = b.copy()
+ c1 = ufunc(a, out=b0)
+ c2 = ufunc(a, out=b)
+ assert_array_equal(c1, c2)
+
+ # Trigger "fancy ufunc loop" code path
+ mask = view_element_first_byte(b).view(np.bool_)
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c2 = ufunc(a, out=b, where=mask.copy()).copy()
+
+ # Also, mask overlapping with output
+ a[...] = a_orig
+ b[...] = b_orig
+ c3 = ufunc(a, out=b, where=mask).copy()
+
+ assert_array_equal(c1, c2)
+ assert_array_equal(c1, c3)
+
+ dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
+ np.float64, np.complex64, np.complex128]
+ dtypes = [np.dtype(x) for x in dtypes]
+
+ for dtype in dtypes:
+ if np.issubdtype(dtype, np.integer):
+ ufunc = np.invert
+ else:
+ ufunc = np.reciprocal
+
+ n = 1000
+ k = 10
+ indices = [
+ np.index_exp[:n],
+ np.index_exp[k:k+n],
+ np.index_exp[n-1::-1],
+ np.index_exp[k+n-1:k-1:-1],
+ np.index_exp[:2*n:2],
+ np.index_exp[k:k+2*n:2],
+ np.index_exp[2*n-1::-2],
+ np.index_exp[k+2*n-1:k-1:-2],
+ ]
+
+ for xi, yi in itertools.product(indices, indices):
+ v = np.arange(1, 1 + n*2 + k, dtype=dtype)
+ x = v[xi]
+ y = v[yi]
+
+ with np.errstate(all='ignore'):
+ check(x, y)
+
+ # Scalar cases
+ check(x[:1], y)
+ check(x[-1:], y)
+ check(x[:1].reshape([]), y)
+ check(x[-1:].reshape([]), y)
+
+ def test_unary_ufunc_where_same(self):
+ # Check behavior at wheremask overlap
+ ufunc = np.invert
+
+ def check(a, out, mask):
+ c1 = ufunc(a, out=out.copy(), where=mask.copy())
+ c2 = ufunc(a, out=out, where=mask)
+ assert_array_equal(c1, c2)
+
+ # Check behavior with same input and output arrays
+ x = np.arange(100).astype(np.bool_)
+ check(x, x, x)
+ check(x, x.copy(), x)
+ check(x, x, x.copy())
+
+ @pytest.mark.slow
+ def test_binary_ufunc_1d_manual(self):
+ ufunc = np.add
+
+ def check(a, b, c):
+ c0 = c.copy()
+ c1 = ufunc(a, b, out=c0)
+ c2 = ufunc(a, b, out=c)
+ assert_array_equal(c1, c2)
+
+ for dtype in [np.int8, np.int16, np.int32, np.int64,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ # Check different data dependency orders
+
+ n = 1000
+ k = 10
+
+ indices = []
+ for p in [1, 2]:
+ indices.extend([
+ np.index_exp[:p*n:p],
+ np.index_exp[k:k+p*n:p],
+ np.index_exp[p*n-1::-p],
+ np.index_exp[k+p*n-1:k-1:-p],
+ ])
+
+ for x, y, z in itertools.product(indices, indices, indices):
+ v = np.arange(6*n).astype(dtype)
+ x = v[x]
+ y = v[y]
+ z = v[z]
+
+ check(x, y, z)
+
+ # Scalar cases
+ check(x[:1], y, z)
+ check(x[-1:], y, z)
+ check(x[:1].reshape([]), y, z)
+ check(x[-1:].reshape([]), y, z)
+ check(x, y[:1], z)
+ check(x, y[-1:], z)
+ check(x, y[:1].reshape([]), z)
+ check(x, y[-1:].reshape([]), z)
+
+ def test_inplace_op_simple_manual(self):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(200, 200) # bigger than bufsize
+
+ x += x.T
+ assert_array_equal(x - x.T, 0)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc
new file mode 100644
index 0000000..4c41a10
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_mem_overlap.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py
new file mode 100644
index 0000000..990d0ae
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.py
@@ -0,0 +1,206 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+import shutil
+import mmap
+import pytest
+from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp
+
+from numpy import (
+ memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
+from numpy.compat import Path
+
+from numpy import arange, allclose, asarray
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, suppress_warnings
+ )
+
+class TestMemmap(object):
+ def setup(self):
+ self.tmpfp = NamedTemporaryFile(prefix='mmap')
+ self.tempdir = mkdtemp()
+ self.shape = (3, 4)
+ self.dtype = 'float32'
+ self.data = arange(12, dtype=self.dtype)
+ self.data.resize(self.shape)
+
+ def teardown(self):
+ self.tmpfp.close()
+ shutil.rmtree(self.tempdir)
+
+ def test_roundtrip(self):
+ # Write data to file
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ del fp # Test __del__ machinery, which handles cleanup
+
+ # Read data back from file
+ newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
+ shape=self.shape)
+ assert_(allclose(self.data, newfp))
+ assert_array_equal(self.data, newfp)
+ assert_equal(newfp.flags.writeable, False)
+
+ def test_open_with_filename(self):
+ tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ del fp
+
+ def test_unnamed_file(self):
+ with TemporaryFile() as f:
+ fp = memmap(f, dtype=self.dtype, shape=self.shape)
+ del fp
+
+ def test_attributes(self):
+ offset = 1
+ mode = "w+"
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
+ shape=self.shape, offset=offset)
+ assert_equal(offset, fp.offset)
+ assert_equal(mode, fp.mode)
+ del fp
+
+ def test_filename(self):
+ tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ abspath = os.path.abspath(tmpname)
+ fp[:] = self.data[:]
+ assert_equal(abspath, fp.filename)
+ b = fp[:1]
+ assert_equal(abspath, b.filename)
+ del b
+ del fp
+
+ @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+ def test_path(self):
+ tmpname = mktemp('', 'mmap', dir=self.tempdir)
+ fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ # os.path.realpath does not resolve symlinks on Windows
+ # see: https://bugs.python.org/issue9949
+ # use Path.resolve, just as memmap class does internally
+ abspath = str(Path(tmpname).resolve())
+ fp[:] = self.data[:]
+ assert_equal(abspath, str(fp.filename.resolve()))
+ b = fp[:1]
+ assert_equal(abspath, str(b.filename.resolve()))
+ del b
+ del fp
+
+ def test_filename_fileobj(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
+ shape=self.shape)
+ assert_equal(fp.filename, self.tmpfp.name)
+
+ @pytest.mark.skipif(sys.platform == 'gnu0',
+ reason="Known to fail on hurd")
+ def test_flush(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ assert_equal(fp[0], self.data[0])
+ fp.flush()
+
+ def test_del(self):
+ # Make sure a view does not delete the underlying mmap
+ fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp_base[0] = 5
+ fp_view = fp_base[0:1]
+ assert_equal(fp_view[0], 5)
+ del fp_view
+ # Should still be able to access and assign values after
+ # deleting the view
+ assert_equal(fp_base[0], 5)
+ fp_base[0] = 6
+ assert_equal(fp_base[0], 6)
+
+ def test_arithmetic_drops_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ tmp = (fp + 10)
+ if isinstance(tmp, memmap):
+ assert_(tmp._mmap is not fp._mmap)
+
+ def test_indexing_drops_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ tmp = fp[(1, 2), (2, 3)]
+ if isinstance(tmp, memmap):
+ assert_(tmp._mmap is not fp._mmap)
+
+ def test_slicing_keeps_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ assert_(fp[:2, :2]._mmap is fp._mmap)
+
+ def test_view(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ new1 = fp.view()
+ new2 = new1.view()
+ assert_(new1.base is fp)
+ assert_(new2.base is fp)
+ new_array = asarray(fp)
+ assert_(new_array.base is fp)
+
+ def test_ufunc_return_ndarray(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "np.average currently does not preserve")
+ for unary_op in [sum, average, product]:
+ result = unary_op(fp)
+ assert_(isscalar(result))
+ assert_(result.__class__ is self.data[0, 0].__class__)
+
+ assert_(unary_op(fp, axis=0).__class__ is ndarray)
+ assert_(unary_op(fp, axis=1).__class__ is ndarray)
+
+ for binary_op in [add, subtract, multiply]:
+ assert_(binary_op(fp, self.data).__class__ is ndarray)
+ assert_(binary_op(self.data, fp).__class__ is ndarray)
+ assert_(binary_op(fp, fp).__class__ is ndarray)
+
+ fp += 1
+ assert(fp.__class__ is memmap)
+ add(fp, 1, out=fp)
+ assert(fp.__class__ is memmap)
+
+ def test_getitem(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ assert_(fp[1:, :-1].__class__ is memmap)
+ # Fancy indexing returns a copy that is not memmapped
+ assert_(fp[[0, 1]].__class__ is ndarray)
+
+ def test_memmap_subclass(self):
+ class MemmapSubClass(memmap):
+ pass
+
+ fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ # We keep previous behavior for subclasses of memmap, i.e. the
+ # ufunc and __getitem__ output is never turned into a ndarray
+ assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
+ assert_(sum(fp).__class__ is MemmapSubClass)
+ assert_(fp[1:, :-1].__class__ is MemmapSubClass)
+ assert(fp[[0, 1]].__class__ is MemmapSubClass)
+
+ def test_mmap_offset_greater_than_allocation_granularity(self):
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
+ assert_(fp.offset == offset)
+
+ def test_no_shape(self):
+ self.tmpfp.write(b'a'*16)
+ mm = memmap(self.tmpfp, dtype='float64')
+ assert_equal(mm.shape, (2,))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc
new file mode 100644
index 0000000..833c00c
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_memmap.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py
new file mode 100644
index 0000000..7dd35c7
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py
@@ -0,0 +1,8095 @@
+from __future__ import division, absolute_import, print_function
+
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+import tempfile
+import sys
+import shutil
+import warnings
+import operator
+import io
+import itertools
+import functools
+import ctypes
+import os
+import gc
+import weakref
+import pytest
+from contextlib import contextmanager
+
+from numpy.core.numeric import pickle
+
+if sys.version_info[0] >= 3:
+ import builtins
+else:
+ import __builtin__ as builtins
+from decimal import Decimal
+
+import numpy as np
+from numpy.compat import strchar, unicode
+import numpy.core._multiarray_tests as _multiarray_tests
+from numpy.testing import (
+ assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
+ assert_array_equal, assert_raises_regex, assert_array_almost_equal,
+ assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
+ temppath, suppress_warnings
+ )
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+# Need to test an object that does not fully implement math interface
+from datetime import timedelta, datetime
+
+
+if sys.version_info[:2] > (3, 2):
+ # In Python 3.3 the representation of empty shape, strides and sub-offsets
+ # is an empty tuple instead of None.
+ # https://docs.python.org/dev/whatsnew/3.3.html#api-changes
+ EMPTY = ()
+else:
+ EMPTY = None
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+ """
+ Allocate a new ndarray with aligned memory.
+
+ The ndarray is guaranteed *not* aligned to twice the requested alignment.
+ Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
+ dtype.alignment."""
+ dtype = np.dtype(dtype)
+ if dtype == np.dtype(object):
+ # Can't do this, fall back to standard allocation (which
+ # should always be sufficiently aligned)
+ if align is not None:
+ raise ValueError("object array alignment not supported")
+ return np.zeros(shape, dtype=dtype, order=order)
+ if align is None:
+ align = dtype.alignment
+ if not hasattr(shape, '__len__'):
+ shape = (shape,)
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
+ buf = np.empty(size + 2*align + 1, np.uint8)
+
+ ptr = buf.__array_interface__['data'][0]
+ offset = ptr % align
+ if offset != 0:
+ offset = align - offset
+ if (ptr % (2*align)) == 0:
+ offset += align
+
+ # Note: slices producing 0-size arrays do not necessarily change
+ # data pointer --- so we use and allocate size+1
+ buf = buf[offset:offset+size+1][:-1]
+ data = np.ndarray(shape, dtype, buf, order=order)
+ data.fill(0)
+ return data
+
+
+class TestFlags(object):
+ def setup(self):
+ self.a = np.arange(10)
+
+ def test_writeable(self):
+ mydict = locals()
+ self.a.flags.writeable = False
+ assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ self.a.flags.writeable = True
+ self.a[0] = 5
+ self.a[0] = 0
+
+ def test_writeable_from_readonly(self):
+ # gh-9440 - make sure fromstring, from buffer on readonly buffers
+ # set writeable False
+ data = b'\x00' * 100
+ vals = np.frombuffer(data, 'B')
+ assert_raises(ValueError, vals.setflags, write=True)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_raises(ValueError, vals.setflags, write=True)
+
+ def test_writeable_from_buffer(self):
+ data = bytearray(b'\x00' * 100)
+ vals = np.frombuffer(data, 'B')
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
+ def test_writeable_pickle(self):
+ import pickle
+ # Small arrays will be copied without setting base.
+ # See condition for using PyArray_SetBaseObject in
+ # array_setstate.
+ a = np.arange(1000)
+ for v in range(pickle.HIGHEST_PROTOCOL):
+ vals = pickle.loads(pickle.dumps(a, v))
+ assert_(vals.flags.writeable)
+ assert_(isinstance(vals.base, bytes))
+
+ def test_otherflags(self):
+ assert_equal(self.a.flags.carray, True)
+ assert_equal(self.a.flags['C'], True)
+ assert_equal(self.a.flags.farray, False)
+ assert_equal(self.a.flags.behaved, True)
+ assert_equal(self.a.flags.fnc, False)
+ assert_equal(self.a.flags.forc, True)
+ assert_equal(self.a.flags.owndata, True)
+ assert_equal(self.a.flags.writeable, True)
+ assert_equal(self.a.flags.aligned, True)
+ with assert_warns(DeprecationWarning):
+ assert_equal(self.a.flags.updateifcopy, False)
+ with assert_warns(DeprecationWarning):
+ assert_equal(self.a.flags['U'], False)
+ assert_equal(self.a.flags['UPDATEIFCOPY'], False)
+ assert_equal(self.a.flags.writebackifcopy, False)
+ assert_equal(self.a.flags['X'], False)
+ assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
+
+ def test_string_align(self):
+ a = np.zeros(4, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+ # not power of two are accessed byte-wise and thus considered aligned
+ a = np.zeros(5, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+
+ def test_void_align(self):
+ a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
+ assert_(a.flags.aligned)
+
+
+class TestHash(object):
+ # see #3793
+ def test_int(self):
+ for st, ut, s in [(np.int8, np.uint8, 8),
+ (np.int16, np.uint16, 16),
+ (np.int32, np.uint32, 32),
+ (np.int64, np.uint64, 64)]:
+ for i in range(1, s):
+ assert_equal(hash(st(-2**i)), hash(-2**i),
+ err_msg="%r: -2**%d" % (st, i))
+ assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
+ err_msg="%r: 2**%d" % (st, i - 1))
+ assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
+ err_msg="%r: 2**%d - 1" % (st, i))
+
+ i = max(i - 1, 1)
+ assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
+ err_msg="%r: 2**%d" % (ut, i - 1))
+ assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
+ err_msg="%r: 2**%d - 1" % (ut, i))
+
+
+class TestAttributes(object):
+ def setup(self):
+ self.one = np.arange(10)
+ self.two = np.arange(20).reshape(4, 5)
+ self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
+
+ def test_attributes(self):
+ assert_equal(self.one.shape, (10,))
+ assert_equal(self.two.shape, (4, 5))
+ assert_equal(self.three.shape, (2, 5, 6))
+ self.three.shape = (10, 3, 2)
+ assert_equal(self.three.shape, (10, 3, 2))
+ self.three.shape = (2, 5, 6)
+ assert_equal(self.one.strides, (self.one.itemsize,))
+ num = self.two.itemsize
+ assert_equal(self.two.strides, (5*num, num))
+ num = self.three.itemsize
+ assert_equal(self.three.strides, (30*num, 6*num, num))
+ assert_equal(self.one.ndim, 1)
+ assert_equal(self.two.ndim, 2)
+ assert_equal(self.three.ndim, 3)
+ num = self.two.itemsize
+ assert_equal(self.two.size, 20)
+ assert_equal(self.two.nbytes, 20*num)
+ assert_equal(self.two.itemsize, self.two.dtype.itemsize)
+ assert_equal(self.two.base, np.arange(20))
+
+ def test_dtypeattr(self):
+ assert_equal(self.one.dtype, np.dtype(np.int_))
+ assert_equal(self.three.dtype, np.dtype(np.float_))
+ assert_equal(self.one.dtype.char, 'l')
+ assert_equal(self.three.dtype.char, 'd')
+ assert_(self.three.dtype.str[0] in '<>')
+ assert_equal(self.one.dtype.str[1], 'i')
+ assert_equal(self.three.dtype.str[1], 'f')
+
+ def test_int_subclassing(self):
+ # Regression test for https://github.com/numpy/numpy/pull/3526
+
+ numpy_int = np.int_(0)
+
+ if sys.version_info[0] >= 3:
+ # On Py3k int_ should not inherit from int, because it's not
+ # fixed-width anymore
+ assert_equal(isinstance(numpy_int, int), False)
+ else:
+ # Otherwise, it should inherit from int...
+ assert_equal(isinstance(numpy_int, int), True)
+
+ # ... and fast-path checks on C-API level should also work
+ from numpy.core._multiarray_tests import test_int_subclass
+ assert_equal(test_int_subclass(numpy_int), True)
+
+ def test_stridesattr(self):
+ x = self.one
+
+ def make_array(size, offset, strides):
+ return np.ndarray(size, buffer=x, dtype=int,
+ offset=offset*x.itemsize,
+ strides=strides*x.itemsize)
+
+ assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(ValueError, make_array, 8, 3, 1)
+ assert_equal(make_array(8, 3, 0), np.array([3]*8))
+ # Check behavior reported in gh-2503:
+ assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+ make_array(0, 0, 10)
+
+ def test_set_stridesattr(self):
+ x = self.one
+
+ def make_array(size, offset, strides):
+ try:
+ r = np.ndarray([size], dtype=int, buffer=x,
+ offset=offset*x.itemsize)
+ except Exception as e:
+ raise RuntimeError(e)
+ r.strides = strides = strides*x.itemsize
+ return r
+
+ assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+ assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(RuntimeError, make_array, 8, 3, 1)
+ # Check that the true extent of the array is used.
+ # Test relies on as_strided base not exposing a buffer.
+ x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
+
+ def set_strides(arr, strides):
+ arr.strides = strides
+
+ assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+
+ # Test for offset calculations:
+ x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
+ shape=(10,), strides=(-1,))
+ assert_raises(ValueError, set_strides, x[::-1], -1)
+ a = x[::-1]
+ a.strides = 1
+ a[::2].strides = 2
+
+ def test_fill(self):
+ for t in "?bhilqpBHILQPfdgFDGO":
+ x = np.empty((3, 2, 1), t)
+ y = np.empty((3, 2, 1), t)
+ x.fill(1)
+ y[...] = 1
+ assert_equal(x, y)
+
+ def test_fill_max_uint64(self):
+ x = np.empty((3, 2, 1), dtype=np.uint64)
+ y = np.empty((3, 2, 1), dtype=np.uint64)
+ value = 2**64 - 1
+ y[...] = value
+ x.fill(value)
+ assert_array_equal(x, y)
+
+ def test_fill_struct_array(self):
+ # Filling from a scalar
+ x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
+ x.fill(x[0])
+ assert_equal(x['f1'][1], x['f1'][0])
+ # Filling from a tuple that can be converted
+ # to a scalar
+ x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
+ x.fill((3.5, -2))
+ assert_array_equal(x['a'], [3.5, 3.5])
+ assert_array_equal(x['b'], [-2, -2])
+
+
+class TestArrayConstruction(object):
+ def test_array(self):
+ d = np.ones(6)
+ r = np.array([d, d])
+ assert_equal(r, np.ones((2, 6)))
+
+ d = np.ones(6)
+ tgt = np.ones((2, 6))
+ r = np.array([d, d])
+ assert_equal(r, tgt)
+ tgt[1] = 2
+ r = np.array([d, d + 1])
+ assert_equal(r, tgt)
+
+ d = np.ones(6)
+ r = np.array([[d, d]])
+ assert_equal(r, np.ones((1, 2, 6)))
+
+ d = np.ones(6)
+ r = np.array([[d, d], [d, d]])
+ assert_equal(r, np.ones((2, 2, 6)))
+
+ d = np.ones((6, 6))
+ r = np.array([d, d])
+ assert_equal(r, np.ones((2, 6, 6)))
+
+ d = np.ones((6, ))
+ r = np.array([[d, d + 1], d + 2])
+ assert_equal(len(r), 2)
+ assert_equal(r[0], [d, d + 1])
+ assert_equal(r[1], d + 2)
+
+ tgt = np.ones((2, 3), dtype=bool)
+ tgt[0, 2] = False
+ tgt[1, 0:2] = False
+ r = np.array([[True, True, False], [False, False, True]])
+ assert_equal(r, tgt)
+ r = np.array([[True, False], [True, False], [False, True]])
+ assert_equal(r, tgt.T)
+
+ def test_array_empty(self):
+ assert_raises(TypeError, np.array)
+
+ def test_array_copy_false(self):
+ d = np.array([1, 2, 3])
+ e = np.array(d, copy=False)
+ d[1] = 3
+ assert_array_equal(e, [1, 3, 3])
+ e = np.array(d, copy=False, order='F')
+ d[1] = 4
+ assert_array_equal(e, [1, 4, 3])
+ e[2] = 7
+ assert_array_equal(d, [1, 4, 7])
+
+ def test_array_copy_true(self):
+ d = np.array([[1,2,3], [1, 2, 3]])
+ e = np.array(d, copy=True)
+ d[0, 1] = 3
+ e[0, 2] = -7
+ assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
+ assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
+ e = np.array(d, copy=True, order='F')
+ d[0, 1] = 5
+ e[0, 2] = 7
+ assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
+ assert_array_equal(d, [[1, 5, 3], [1,2,3]])
+
+ def test_array_cont(self):
+ d = np.ones(10)[::2]
+ assert_(np.ascontiguousarray(d).flags.c_contiguous)
+ assert_(np.ascontiguousarray(d).flags.f_contiguous)
+ assert_(np.asfortranarray(d).flags.c_contiguous)
+ assert_(np.asfortranarray(d).flags.f_contiguous)
+ d = np.ones((10, 10))[::2,::2]
+ assert_(np.ascontiguousarray(d).flags.c_contiguous)
+ assert_(np.asfortranarray(d).flags.f_contiguous)
+
+
+class TestAssignment(object):
+ def test_assignment_broadcasting(self):
+ a = np.arange(6).reshape(2, 3)
+
+ # Broadcasting the input to the output
+ a[...] = np.arange(3)
+ assert_equal(a, [[0, 1, 2], [0, 1, 2]])
+ a[...] = np.arange(2).reshape(2, 1)
+ assert_equal(a, [[0, 0, 0], [1, 1, 1]])
+
+ # For compatibility with <= 1.5, a limited version of broadcasting
+ # the output to the input.
+ #
+ # This behavior is inconsistent with NumPy broadcasting
+ # in general, because it only uses one of the two broadcasting
+ # rules (adding a new "1" dimension to the left of the shape),
+ # applied to the output instead of an input. In NumPy 2.0, this kind
+ # of broadcasting assignment will likely be disallowed.
+ a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
+ assert_equal(a, [[5, 4, 3], [2, 1, 0]])
+ # The other type of broadcasting would require a reduction operation.
+
+ def assign(a, b):
+ a[...] = b
+
+ assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
+
+ def test_assignment_errors(self):
+ # Address issue #2276
+ class C:
+ pass
+ a = np.zeros(1)
+
+ def assign(v):
+ a[0] = v
+
+ assert_raises((AttributeError, TypeError), assign, C())
+ assert_raises(ValueError, assign, [1])
+
+ def test_unicode_assignment(self):
+ # gh-5049
+ from numpy.core.numeric import set_string_function
+
+ @contextmanager
+ def inject_str(s):
+ """ replace ndarray.__str__ temporarily """
+ set_string_function(lambda x: s, repr=False)
+ try:
+ yield
+ finally:
+ set_string_function(None, repr=False)
+
+ a1d = np.array([u'test'])
+ a0d = np.array(u'done')
+ with inject_str(u'bad'):
+ a1d[0] = a0d # previously this would invoke __str__
+ assert_equal(a1d[0], u'done')
+
+ # this would crash for the same reason
+ np.array([np.array(u'\xe5\xe4\xf6')])
+
+ def test_stringlike_empty_list(self):
+ # gh-8902
+ u = np.array([u'done'])
+ b = np.array([b'done'])
+
+ class bad_sequence(object):
+ def __getitem__(self): pass
+ def __len__(self): raise RuntimeError
+
+ assert_raises(ValueError, operator.setitem, u, 0, [])
+ assert_raises(ValueError, operator.setitem, b, 0, [])
+
+ assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
+ assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
+
+ def test_longdouble_assignment(self):
+ # only relevant if longdouble is larger than float
+ # we're looking for loss of precision
+
+ for dtype in (np.longdouble, np.longcomplex):
+ # gh-8902
+ tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
+ tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
+
+ # construction
+ tiny1d = np.array([tinya])
+ assert_equal(tiny1d[0], tinya)
+
+ # scalar = scalar
+ tiny1d[0] = tinyb
+ assert_equal(tiny1d[0], tinyb)
+
+ # 0d = scalar
+ tiny1d[0, ...] = tinya
+ assert_equal(tiny1d[0], tinya)
+
+ # 0d = 0d
+ tiny1d[0, ...] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ # scalar = 0d
+ tiny1d[0] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ arr = np.array([np.array(tinya)])
+ assert_equal(arr[0], tinya)
+
+ def test_cast_to_string(self):
+ # cast to str should do "str(scalar)", not "str(scalar.item())"
+ # Example: In python2, str(float) is truncated, so we want to avoid
+ # str(np.float64(...).item()) as this would incorrectly truncate.
+ a = np.zeros(1, dtype='S20')
+ a[:] = np.array(['1.12345678901234567890'], dtype='f8')
+ assert_equal(a[0], b"1.1234567890123457")
+
+
+class TestDtypedescr(object):
+ def test_construction(self):
+ d1 = np.dtype('i4')
+ assert_equal(d1, np.dtype(np.int32))
+ d2 = np.dtype('f8')
+ assert_equal(d2, np.dtype(np.float64))
+
+ def test_byteorders(self):
+ assert_(np.dtype('i4'))
+ assert_(np.dtype([('a', 'i4')]))
+
+ def test_structured_non_void(self):
+ fields = [('a', '= 3, reason="Not Python 2")
+ def test_sequence_long(self):
+ assert_equal(np.array([long(4), long(4)]).dtype, np.long)
+ assert_equal(np.array([long(4), 2**80]).dtype, object)
+ assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
+ assert_equal(np.array([2**80, long(4)]).dtype, object)
+
+ def test_non_sequence_sequence(self):
+ """Should not segfault.
+
+ Class Fail breaks the sequence protocol for new style classes, i.e.,
+ those derived from object. Class Map is a mapping type indicated by
+ raising a ValueError. At some point we may raise a warning instead
+ of an error in the Fail case.
+
+ """
+ class Fail(object):
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, index):
+ raise ValueError()
+
+ class Map(object):
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, index):
+ raise KeyError()
+
+ a = np.array([Map()])
+ assert_(a.shape == (1,))
+ assert_(a.dtype == np.dtype(object))
+ assert_raises(ValueError, np.array, [Fail()])
+
+ def test_no_len_object_type(self):
+ # gh-5100, want object array from iterable object without len()
+ class Point2:
+ def __init__(self):
+ pass
+
+ def __getitem__(self, ind):
+ if ind in [0, 1]:
+ return ind
+ else:
+ raise IndexError()
+ d = np.array([Point2(), Point2(), Point2()])
+ assert_equal(d.dtype, np.dtype(object))
+
+ def test_false_len_sequence(self):
+ # gh-7264, segfault for this example
+ class C:
+ def __getitem__(self, i):
+ raise IndexError
+ def __len__(self):
+ return 42
+
+ assert_raises(ValueError, np.array, C()) # segfault?
+
+ def test_failed_len_sequence(self):
+ # gh-7393
+ class A(object):
+ def __init__(self, data):
+ self._data = data
+ def __getitem__(self, item):
+ return type(self)(self._data[item])
+ def __len__(self):
+ return len(self._data)
+
+ # len(d) should give 3, but len(d[0]) will fail
+ d = A([1,2,3])
+ assert_equal(len(np.array(d)), 3)
+
+ def test_array_too_big(self):
+ # Test that array creation succeeds for arrays addressable by intp
+ # on the byte level and fails for too large arrays.
+ buf = np.zeros(100)
+
+ max_bytes = np.iinfo(np.intp).max
+ for dtype in ["intp", "S20", "b"]:
+ dtype = np.dtype(dtype)
+ itemsize = dtype.itemsize
+
+ np.ndarray(buffer=buf, strides=(0,),
+ shape=(max_bytes//itemsize,), dtype=dtype)
+ assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
+ shape=(max_bytes//itemsize + 1,), dtype=dtype)
+
+ def test_jagged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = np.array([[1], 2, 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, [2], 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, 2, [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ def test_jagged_shape_object(self):
+ # The jagged dimension of a list is turned into an object array
+ a = np.array([[1, 1], [2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2, 2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2], [3, 3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+
+class TestStructured(object):
+ def test_subarray_field_access(self):
+ a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
+ a['a'] = np.arange(60).reshape(3, 5, 2, 2)
+
+ # Since the subarray is always in C-order, a transpose
+ # does not swap the subarray:
+ assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
+
+ # In Fortran order, the subarray gets appended
+ # like in all other cases, not prepended as a special case
+ b = a.copy(order='F')
+ assert_equal(a['a'].shape, b['a'].shape)
+ assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
+
+ def test_subarray_comparison(self):
+ # Check that comparisons between record arrays with
+ # multi-dimensional field types work properly
+ a = np.rec.fromrecords(
+ [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
+ dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
+ b = a.copy()
+ assert_equal(a == b, [True, True])
+ assert_equal(a != b, [False, False])
+ b[1].b = 'c'
+ assert_equal(a == b, [True, False])
+ assert_equal(a != b, [False, True])
+ for i in range(3):
+ b[0].a = a[0].a
+ b[0].a[i] = 5
+ assert_equal(a == b, [False, False])
+ assert_equal(a != b, [True, True])
+ for i in range(2):
+ for j in range(2):
+ b = a.copy()
+ b[0].c[i, j] = 10
+ assert_equal(a == b, [False, True])
+ assert_equal(a != b, [True, False])
+
+ # Check that broadcasting with a subarray works
+ a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
+ b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
+ assert_equal(a == b, [[True, True, False], [False, False, True]])
+ assert_equal(b == a, [[True, True, False], [False, False, True]])
+ a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
+ b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
+ assert_equal(a == b, [[True, True, False], [False, False, True]])
+ assert_equal(b == a, [[True, True, False], [False, False, True]])
+ a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
+ b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
+ assert_equal(a == b, [[True, False, False], [False, False, True]])
+ assert_equal(b == a, [[True, False, False], [False, False, True]])
+
+ # Check that broadcasting Fortran-style arrays with a subarray work
+ a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
+ b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
+ assert_equal(a == b, [[True, False, False], [False, False, True]])
+ assert_equal(b == a, [[True, False, False], [False, False, True]])
+
+ # Check that incompatible sub-array shapes don't result to broadcasting
+ x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
+ y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
+ # This comparison invokes deprecated behaviour, and will probably
+ # start raising an error eventually. What we really care about in this
+ # test is just that it doesn't return True.
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "elementwise == comparison failed")
+ assert_equal(x == y, False)
+
+ x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
+ y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
+ # This comparison invokes deprecated behaviour, and will probably
+ # start raising an error eventually. What we really care about in this
+ # test is just that it doesn't return True.
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "elementwise == comparison failed")
+ assert_equal(x == y, False)
+
+ # Check that structured arrays that are different only in
+ # byte-order work
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', 'f8')])
+ assert_equal(a == b, [False, True])
+
+ def test_casting(self):
+ # Check that casting a structured array to change its byte order
+ # works
+ a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe'))
+ b = a.astype([('a', '>i4')])
+ assert_equal(b, a.byteswap().newbyteorder())
+ assert_equal(a['a'][0], b['a'][0])
+
+ # Check that equality comparison works on structured arrays if
+ # they are 'equiv'-castable
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')])
+ assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+ assert_equal(a == b, [True, True])
+
+ # Check that 'equiv' casting can change byte order
+ assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+ c = a.astype(b.dtype, casting='equiv')
+ assert_equal(a == c, [True, True])
+
+ # Check that 'safe' casting can change byte order and up-cast
+ # fields
+ t = [('a', 'f8')]
+ assert_(np.can_cast(a.dtype, t, casting='safe'))
+ c = a.astype(t, casting='safe')
+ assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+ [True, True])
+
+ # Check that 'same_kind' casting can change byte order and
+ # change field widths within a "kind"
+ t = [('a', 'f4')]
+ assert_(np.can_cast(a.dtype, t, casting='same_kind'))
+ c = a.astype(t, casting='same_kind')
+ assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+ [True, True])
+
+ # Check that casting fails if the casting rule should fail on
+ # any of the fields
+ t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')]
+ assert_(not np.can_cast(a.dtype, t, casting=casting))
+ t = [('a', '>i4'), ('b', ' false
+ for n in range(3):
+ v = np.array(b'', (dtype, n))
+ assert_equal(bool(v), False)
+ assert_equal(bool(v[()]), False)
+ assert_equal(v.astype(bool), False)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.False_)
+
+ # anything else -> true
+ for n in range(1, 4):
+ for val in [b'a', b'0', b' ']:
+ v = np.array(val, (dtype, n))
+ assert_equal(bool(v), True)
+ assert_equal(bool(v[()]), True)
+ assert_equal(v.astype(bool), True)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.True_)
+
+ def test_cast_from_void(self):
+ self._test_cast_from_flexible(np.void)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_unicode(self):
+ self._test_cast_from_flexible(np.unicode_)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_bytes(self):
+ self._test_cast_from_flexible(np.bytes_)
+
+
+class TestZeroSizeFlexible(object):
+ @staticmethod
+ def _zeros(shape, dtype=str):
+ dtype = np.dtype(dtype)
+ if dtype == np.void:
+ return np.zeros(shape, dtype=(dtype, 0))
+
+ # not constructable directly
+ dtype = np.dtype([('x', dtype, 0)])
+ return np.zeros(shape, dtype=dtype)['x']
+
+ def test_create(self):
+ zs = self._zeros(10, bytes)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, np.void)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, unicode)
+ assert_equal(zs.itemsize, 0)
+
+ def _test_sort_partition(self, name, kinds, **kwargs):
+ # Previously, these would all hang
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ sort_method = getattr(zs, name)
+ sort_func = getattr(np, name)
+ for kind in kinds:
+ sort_method(kind=kind, **kwargs)
+ sort_func(zs, kind=kind, **kwargs)
+
+ def test_sort(self):
+ self._test_sort_partition('sort', kinds='qhm')
+
+ def test_argsort(self):
+ self._test_sort_partition('argsort', kinds='qhm')
+
+ def test_partition(self):
+ self._test_sort_partition('partition', kinds=['introselect'], kth=2)
+
+ def test_argpartition(self):
+ self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
+
+ def test_resize(self):
+ # previously an error
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ zs.resize(25)
+ zs.resize((10, 10))
+
+ def test_view(self):
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+
+ # viewing as itself should be allowed
+ assert_equal(zs.view(dt).dtype, np.dtype(dt))
+
+ # viewing as any non-empty type gives an empty result
+ assert_equal(zs.view((dt, 1)).shape, (0,))
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ p = pickle.dumps(zs, protocol=proto)
+ zs2 = pickle.loads(p)
+
+ assert_equal(zs.dtype, zs2.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_pickle_with_buffercallback(self):
+ array = np.arange(10)
+ buffers = []
+ bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+ protocol=5)
+ array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+ # when using pickle protocol 5 with buffer callbacks,
+ # array_from_buffer is reconstructed from a buffer holding a view
+ # to the initial array's data, so modifying an element in array
+ # should modify it in array_from_buffer too.
+ array[0] = -1
+ assert array_from_buffer[0] == -1, array_from_buffer[0]
+
+
+class TestMethods(object):
+ def test_compress(self):
+ tgt = [[5, 6, 7, 8, 9]]
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1], axis=0)
+ assert_equal(out, tgt)
+
+ tgt = [[1, 3], [6, 8]]
+ out = arr.compress([0, 1, 0, 1, 0], axis=1)
+ assert_equal(out, tgt)
+
+ tgt = [[1], [6]]
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1], axis=1)
+ assert_equal(out, tgt)
+
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1])
+ assert_equal(out, 1)
+
+ def test_choose(self):
+ x = 2*np.ones((3,), dtype=int)
+ y = 3*np.ones((3,), dtype=int)
+ x2 = 2*np.ones((2, 3), dtype=int)
+ y2 = 3*np.ones((2, 3), dtype=int)
+ ind = np.array([0, 0, 1])
+
+ A = ind.choose((x, y))
+ assert_equal(A, [2, 2, 3])
+
+ A = ind.choose((x2, y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ A = ind.choose((x, y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ oned = np.ones(1)
+ # gh-12031, caused SEGFAULT
+ assert_raises(TypeError, oned.choose,np.void(0), [oned])
+
+ def test_prod(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, a.prod)
+ assert_raises(ArithmeticError, a2.prod, axis=1)
+ else:
+ assert_equal(a.prod(axis=0), 26400)
+ assert_array_equal(a2.prod(axis=0),
+ np.array([50, 36, 84, 180], ctype))
+ assert_array_equal(a2.prod(axis=-1),
+ np.array([24, 1890, 600], ctype))
+
+ def test_repeat(self):
+ m = np.array([1, 2, 3, 4, 5, 6])
+ m_rect = m.reshape((2, 3))
+
+ A = m.repeat([1, 3, 2, 1, 1, 2])
+ assert_equal(A, [1, 2, 2, 2, 3,
+ 3, 4, 5, 6, 6])
+
+ A = m.repeat(2)
+ assert_equal(A, [1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6])
+
+ A = m_rect.repeat([2, 1], axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6]])
+
+ A = m_rect.repeat([1, 3, 2], axis=1)
+ assert_equal(A, [[1, 2, 2, 2, 3, 3],
+ [4, 5, 5, 5, 6, 6]])
+
+ A = m_rect.repeat(2, axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6],
+ [4, 5, 6]])
+
+ A = m_rect.repeat(2, axis=1)
+ assert_equal(A, [[1, 1, 2, 2, 3, 3],
+ [4, 4, 5, 5, 6, 6]])
+
+ def test_reshape(self):
+ arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
+
+ tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+ assert_equal(arr.reshape(2, 6), tgt)
+
+ tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
+ assert_equal(arr.reshape(3, 4), tgt)
+
+ tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
+ assert_equal(arr.reshape((3, 4), order='F'), tgt)
+
+ tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
+ assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
+
+ def test_round(self):
+ def check_round(arr, expected, *round_args):
+ assert_equal(arr.round(*round_args), expected)
+ # With output array
+ out = np.zeros_like(arr)
+ res = arr.round(*round_args, out=out)
+ assert_equal(out, expected)
+ assert_equal(out, res)
+
+ check_round(np.array([1.2, 1.5]), [1, 2])
+ check_round(np.array(1.5), 2)
+ check_round(np.array([12.2, 15.5]), [10, 20], -1)
+ check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
+ # Complex rounding
+ check_round(np.array([4.5 + 1.5j]), [4 + 2j])
+ check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
+
+ def test_squeeze(self):
+ a = np.array([[[1], [2], [3]]])
+ assert_equal(a.squeeze(), [1, 2, 3])
+ assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
+ assert_raises(ValueError, a.squeeze, axis=(1,))
+ assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
+
+ def test_transpose(self):
+ a = np.array([[1, 2], [3, 4]])
+ assert_equal(a.transpose(), [[1, 3], [2, 4]])
+ assert_raises(ValueError, lambda: a.transpose(0))
+ assert_raises(ValueError, lambda: a.transpose(0, 0))
+ assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
+
+ def test_sort(self):
+ # test ordering for floats and complex containing nans. It is only
+ # necessary to check the less-than comparison, so sorts that
+ # only follow the insertion sort path are sufficient. We only
+ # test doubles and complex doubles as the logic is the same.
+
+ # check doubles
+ msg = "Test real sort order with nans"
+ a = np.array([np.nan, 1, 0])
+ b = np.sort(a)
+ assert_equal(b, a[::-1], msg)
+ # check complex
+ msg = "Test complex sort order with nans"
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
+ a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
+ b = np.sort(a)
+ assert_equal(b, a[::-1], msg)
+
+ # all c scalar sorts use the same code with different types
+ # so it suffices to run a quick check with one type. The number
+ # of sorted items must be greater than ~50 to check the actual
+ # algorithm because quick and merge sort fall over to insertion
+ # sort for small arrays.
+ a = np.arange(101)
+ b = a[::-1].copy()
+ for kind in ['q', 'm', 'h']:
+ msg = "scalar sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test complex sorts. These use the same code as the scalars
+ # but the compare function differs.
+ ai = a*1j + 1
+ bi = b*1j + 1
+ for kind in ['q', 'm', 'h']:
+ msg = "complex sort, real part == 1, kind=%s" % kind
+ c = ai.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+ c = bi.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+ ai = a + 1j
+ bi = b + 1j
+ for kind in ['q', 'm', 'h']:
+ msg = "complex sort, imag part == 1, kind=%s" % kind
+ c = ai.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+ c = bi.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+
+ # test sorting of complex arrays requiring byte-swapping, gh-5441
+ for endianness in '<>':
+ for dt in np.typecodes['Complex']:
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+ c = arr.copy()
+ c.sort()
+ msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
+ assert_equal(c, arr, msg)
+
+ # test string sorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)])
+ b = a[::-1].copy()
+ for kind in ['q', 'm', 'h']:
+ msg = "string sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test unicode sorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ b = a[::-1].copy()
+ for kind in ['q', 'm', 'h']:
+ msg = "unicode sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test object array sorts.
+ a = np.empty((101,), dtype=object)
+ a[:] = list(range(101))
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "object sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test record array sorts.
+ dt = np.dtype([('f', float), ('i', int)])
+ a = np.array([(i, i) for i in range(101)], dtype=dt)
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "object sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test datetime64 sorts.
+ a = np.arange(0, 101, dtype='datetime64[D]')
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "datetime64 sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # test timedelta64 sorts.
+ a = np.arange(0, 101, dtype='timedelta64[D]')
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "timedelta64 sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # check axis handling. This should be the same for all type
+ # specific sorts, so we only check it for one type and one kind
+ a = np.array([[3, 2], [1, 0]])
+ b = np.array([[1, 0], [3, 2]])
+ c = np.array([[2, 3], [0, 1]])
+ d = a.copy()
+ d.sort(axis=0)
+ assert_equal(d, b, "test sort with axis=0")
+ d = a.copy()
+ d.sort(axis=1)
+ assert_equal(d, c, "test sort with axis=1")
+ d = a.copy()
+ d.sort()
+ assert_equal(d, c, "test sort with default axis")
+
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array sort with axis={0}'.format(axis)
+ assert_equal(np.sort(a, axis=axis), a, msg)
+ msg = 'test empty array sort with axis=None'
+ assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+
+ # test generic class with bogus ordering,
+ # should not segfault.
+ class Boom(object):
+ def __lt__(self, other):
+ return True
+
+ a = np.array([Boom()]*100, dtype=object)
+ for kind in ['q', 'm', 'h']:
+ msg = "bogus comparison object sort, kind=%s" % kind
+ c.sort(kind=kind)
+
+ def test_void_sort(self):
+ # gh-8210 - previously segfaulted
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view('V4')
+ arr[::-1].sort()
+
+ dt = np.dtype([('val', 'i4', (1,))])
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view(dt)
+ arr[::-1].sort()
+
+ def test_sort_raises(self):
+ #gh-9404
+ arr = np.array([0, datetime.now(), 1], dtype=object)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+ #gh-3879
+ class Raiser(object):
+ def raises_anything(*args, **kwargs):
+ raise TypeError("SOMETHING ERRORED")
+ __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+ arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+ np.random.shuffle(arr)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+
+ def test_sort_degraded(self):
+ # test degraded dataset would take minutes to run with normal qsort
+ d = np.arange(1000000)
+ do = d.copy()
+ x = d
+ # create a median of 3 killer where each median is the sorted second
+ # last element of the quicksort partition
+ while x.size > 3:
+ mid = x.size // 2
+ x[mid], x[-2] = x[-2], x[mid]
+ x = x[:-2]
+
+ assert_equal(np.sort(d), do)
+ assert_equal(d[np.argsort(d)], do)
+
+ def test_copy(self):
+ def assert_fortran(arr):
+ assert_(arr.flags.fortran)
+ assert_(arr.flags.f_contiguous)
+ assert_(not arr.flags.c_contiguous)
+
+ def assert_c(arr):
+ assert_(not arr.flags.fortran)
+ assert_(not arr.flags.f_contiguous)
+ assert_(arr.flags.c_contiguous)
+
+ a = np.empty((2, 2), order='F')
+ # Test copying a Fortran array
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_fortran(a.copy('A'))
+
+ # Now test starting with a C array.
+ a = np.empty((2, 2), order='C')
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_c(a.copy('A'))
+
+ def test_sort_order(self):
+ # Test sorting an array with fields
+ x1 = np.array([21, 32, 14])
+ x2 = np.array(['my', 'first', 'name'])
+ x3 = np.array([3.1, 4.5, 6.2])
+ r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
+
+ r.sort(order=['id'])
+ assert_equal(r.id, np.array([14, 21, 32]))
+ assert_equal(r.word, np.array(['name', 'my', 'first']))
+ assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
+
+ r.sort(order=['word'])
+ assert_equal(r.id, np.array([32, 21, 14]))
+ assert_equal(r.word, np.array(['first', 'my', 'name']))
+ assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
+
+ r.sort(order=['number'])
+ assert_equal(r.id, np.array([21, 32, 14]))
+ assert_equal(r.word, np.array(['my', 'first', 'name']))
+ assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+
+ assert_raises_regex(ValueError, 'duplicate',
+ lambda: r.sort(order=['id', 'id']))
+
+ if sys.byteorder == 'little':
+ strtype = '>i2'
+ else:
+ strtype = '':
+ for dt in np.typecodes['Complex']:
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+ msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
+ assert_equal(arr.argsort(),
+ np.arange(len(arr), dtype=np.intp), msg)
+
+ # test string argsorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)])
+ b = a[::-1].copy()
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'm', 'h']:
+ msg = "string argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test unicode argsorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'm', 'h']:
+ msg = "unicode argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test object array argsorts.
+ a = np.empty((101,), dtype=object)
+ a[:] = list(range(101))
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'm', 'h']:
+ msg = "object argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test structured array argsorts.
+ dt = np.dtype([('f', float), ('i', int)])
+ a = np.array([(i, i) for i in range(101)], dtype=dt)
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'm', 'h']:
+ msg = "structured array argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test datetime64 argsorts.
+ a = np.arange(0, 101, dtype='datetime64[D]')
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "datetime64 argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test timedelta64 argsorts.
+ a = np.arange(0, 101, dtype='timedelta64[D]')
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "timedelta64 argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # check axis handling. This should be the same for all type
+ # specific argsorts, so we only check it for one type and one kind
+ a = np.array([[3, 2], [1, 0]])
+ b = np.array([[1, 1], [0, 0]])
+ c = np.array([[1, 0], [1, 0]])
+ assert_equal(a.copy().argsort(axis=0), b)
+ assert_equal(a.copy().argsort(axis=1), c)
+ assert_equal(a.copy().argsort(), c)
+
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array argsort with axis={0}'.format(axis)
+ assert_equal(np.argsort(a, axis=axis),
+ np.zeros_like(a, dtype=np.intp), msg)
+ msg = 'test empty array argsort with axis=None'
+ assert_equal(np.argsort(a, axis=None),
+ np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
+ # check that stable argsorts are stable
+ r = np.arange(100)
+ # scalars
+ a = np.zeros(100)
+ assert_equal(a.argsort(kind='m'), r)
+ # complex
+ a = np.zeros(100, dtype=complex)
+ assert_equal(a.argsort(kind='m'), r)
+ # string
+ a = np.array(['aaaaaaaaa' for i in range(100)])
+ assert_equal(a.argsort(kind='m'), r)
+ # unicode
+ a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
+ assert_equal(a.argsort(kind='m'), r)
+
+ def test_sort_unicode_kind(self):
+ d = np.arange(10)
+ k = b'\xc3\xa4'.decode("UTF8")
+ assert_raises(ValueError, d.sort, kind=k)
+ assert_raises(ValueError, d.argsort, kind=k)
+
+ def test_searchsorted(self):
+ # test for floats and complex containing nans. The logic is the
+ # same for all float types so only test double types for now.
+ # The search sorted routines use the compare functions for the
+ # array type, so this checks if that is consistent with the sort
+ # order.
+
+ # check double
+ a = np.array([0, 1, np.nan])
+ msg = "Test real searchsorted with nans, side='l'"
+ b = a.searchsorted(a, side='l')
+ assert_equal(b, np.arange(3), msg)
+ msg = "Test real searchsorted with nans, side='r'"
+ b = a.searchsorted(a, side='r')
+ assert_equal(b, np.arange(1, 4), msg)
+ # check double complex
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
+ a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
+ msg = "Test complex searchsorted with nans, side='l'"
+ b = a.searchsorted(a, side='l')
+ assert_equal(b, np.arange(9), msg)
+ msg = "Test complex searchsorted with nans, side='r'"
+ b = a.searchsorted(a, side='r')
+ assert_equal(b, np.arange(1, 10), msg)
+ msg = "Test searchsorted with little endian, side='l'"
+ a = np.array([0, 128], dtype=' p[:, i]).all(),
+ msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
+ aae(p, d1[np.arange(d1.shape[0])[:, None],
+ np.argpartition(d1, i, axis=1, kind=k)])
+
+ p = np.partition(d0, i, axis=0, kind=k)
+ aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
+ # array_less does not seem to work right
+ at((p[:i, :] <= p[i, :]).all(),
+ msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
+ at((p[i + 1:, :] > p[i, :]).all(),
+ msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
+ aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
+ np.arange(d0.shape[1])[None, :]])
+
+ # check inplace
+ dc = d.copy()
+ dc.partition(i, kind=k)
+ assert_equal(dc, np.partition(d, i, kind=k))
+ dc = d0.copy()
+ dc.partition(i, axis=0, kind=k)
+ assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
+ dc = d1.copy()
+ dc.partition(i, axis=1, kind=k)
+ assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
+
+ def assert_partitioned(self, d, kth):
+ prev = 0
+ for k in np.sort(kth):
+ assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
+ assert_((d[k:] >= d[k]).all(),
+ msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
+ prev = k + 1
+
+ def test_partition_iterative(self):
+ d = np.arange(17)
+ kth = (0, 1, 2, 429, 231)
+ assert_raises(ValueError, d.partition, kth)
+ assert_raises(ValueError, d.argpartition, kth)
+ d = np.arange(10).reshape((2, 5))
+ assert_raises(ValueError, d.partition, kth, axis=0)
+ assert_raises(ValueError, d.partition, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=None)
+
+ d = np.array([3, 4, 2, 1])
+ p = np.partition(d, (0, 3))
+ self.assert_partitioned(p, (0, 3))
+ self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
+
+ assert_array_equal(p, np.partition(d, (-3, -1)))
+ assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
+
+ d = np.arange(17)
+ np.random.shuffle(d)
+ d.partition(range(d.size))
+ assert_array_equal(np.arange(17), d)
+ np.random.shuffle(d)
+ assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
+
+ # test unsorted kth
+ d = np.arange(17)
+ np.random.shuffle(d)
+ keys = np.array([1, 3, 8, -2])
+ np.random.shuffle(d)
+ p = np.partition(d, keys)
+ self.assert_partitioned(p, keys)
+ p = d[np.argpartition(d, keys)]
+ self.assert_partitioned(p, keys)
+ np.random.shuffle(keys)
+ assert_array_equal(np.partition(d, keys), p)
+ assert_array_equal(d[np.argpartition(d, keys)], p)
+
+ # equal kth
+ d = np.arange(20)[::-1]
+ self.assert_partitioned(np.partition(d, [5]*4), [5])
+ self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
+ [5]*4 + [6, 13])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
+ [5]*4 + [6, 13])
+
+ d = np.arange(12)
+ np.random.shuffle(d)
+ d1 = np.tile(np.arange(12), (4, 1))
+ map(np.random.shuffle, d1)
+ d0 = np.transpose(d1)
+
+ kth = (1, 6, 7, -1)
+ p = np.partition(d1, kth, axis=1)
+ pa = d1[np.arange(d1.shape[0])[:, None],
+ d1.argpartition(kth, axis=1)]
+ assert_array_equal(p, pa)
+ for i in range(d1.shape[0]):
+ self.assert_partitioned(p[i,:], kth)
+ p = np.partition(d0, kth, axis=0)
+ pa = d0[np.argpartition(d0, kth, axis=0),
+ np.arange(d0.shape[1])[None,:]]
+ assert_array_equal(p, pa)
+ for i in range(d0.shape[1]):
+ self.assert_partitioned(p[:, i], kth)
+
+ def test_partition_cdtype(self):
+ d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.9, 38)],
+ dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype)
+ ops = {
+ 'add': (np.add, True, float),
+ 'sub': (np.subtract, True, float),
+ 'mul': (np.multiply, True, float),
+ 'truediv': (np.true_divide, True, float),
+ 'floordiv': (np.floor_divide, True, float),
+ 'mod': (np.remainder, True, float),
+ 'divmod': (np.divmod, False, float),
+ 'pow': (np.power, True, int),
+ 'lshift': (np.left_shift, True, int),
+ 'rshift': (np.right_shift, True, int),
+ 'and': (np.bitwise_and, True, int),
+ 'xor': (np.bitwise_xor, True, int),
+ 'or': (np.bitwise_or, True, int),
+ # 'ge': (np.less_equal, False),
+ # 'gt': (np.less, False),
+ # 'le': (np.greater_equal, False),
+ # 'lt': (np.greater, False),
+ # 'eq': (np.equal, False),
+ # 'ne': (np.not_equal, False),
+ }
+ if sys.version_info >= (3, 5):
+ ops['matmul'] = (np.matmul, False, float)
+
+ class Coerced(Exception):
+ pass
+
+ def array_impl(self):
+ raise Coerced
+
+ def op_impl(self, other):
+ return "forward"
+
+ def rop_impl(self, other):
+ return "reverse"
+
+ def iop_impl(self, other):
+ return "in-place"
+
+ def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
+ return ("__array_ufunc__", ufunc, method, args, kwargs)
+
+ # Create an object with the given base, in the given module, with a
+ # bunch of placeholder __op__ methods, and optionally a
+ # __array_ufunc__ and __array_priority__.
+ def make_obj(base, array_priority=False, array_ufunc=False,
+ alleged_module="__main__"):
+ class_namespace = {"__array__": array_impl}
+ if array_priority is not False:
+ class_namespace["__array_priority__"] = array_priority
+ for op in ops:
+ class_namespace["__{0}__".format(op)] = op_impl
+ class_namespace["__r{0}__".format(op)] = rop_impl
+ class_namespace["__i{0}__".format(op)] = iop_impl
+ if array_ufunc is not False:
+ class_namespace["__array_ufunc__"] = array_ufunc
+ eval_namespace = {"base": base,
+ "class_namespace": class_namespace,
+ "__name__": alleged_module,
+ }
+ MyType = eval("type('MyType', (base,), class_namespace)",
+ eval_namespace)
+ if issubclass(MyType, np.ndarray):
+ # Use this range to avoid special case weirdnesses around
+ # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
+ return np.arange(3, 7).reshape(2, 2).view(MyType)
+ else:
+ return MyType()
+
+ def check(obj, binop_override_expected, ufunc_override_expected,
+ inplace_override_expected, check_scalar=True):
+ for op, (ufunc, has_inplace, dtype) in ops.items():
+ err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
+ % (op, ufunc, has_inplace, dtype))
+ check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
+ if check_scalar:
+ check_objs.append(check_objs[0][0])
+ for arr in check_objs:
+ arr_method = getattr(arr, "__{0}__".format(op))
+
+ def first_out_arg(result):
+ if op == "divmod":
+ assert_(isinstance(result, tuple))
+ return result[0]
+ else:
+ return result
+
+ # arr __op__ obj
+ if binop_override_expected:
+ assert_equal(arr_method(obj), NotImplemented, err_msg)
+ elif ufunc_override_expected:
+ assert_equal(arr_method(obj)[0], "__array_ufunc__",
+ err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_method(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_method, obj, err_msg=err_msg)
+ # obj __op__ arr
+ arr_rmethod = getattr(arr, "__r{0}__".format(op))
+ if ufunc_override_expected:
+ res = arr_rmethod(obj)
+ assert_equal(res[0], "__array_ufunc__",
+ err_msg=err_msg)
+ assert_equal(res[1], ufunc, err_msg=err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_rmethod(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ # __array_ufunc__ = "asdf" creates a TypeError
+ assert_raises((TypeError, Coerced),
+ arr_rmethod, obj, err_msg=err_msg)
+
+ # arr __iop__ obj
+ # array scalars don't have in-place operators
+ if has_inplace and isinstance(arr, np.ndarray):
+ arr_imethod = getattr(arr, "__i{0}__".format(op))
+ if inplace_override_expected:
+ assert_equal(arr_method(obj), NotImplemented,
+ err_msg=err_msg)
+ elif ufunc_override_expected:
+ res = arr_imethod(obj)
+ assert_equal(res[0], "__array_ufunc__", err_msg)
+ assert_equal(res[1], ufunc, err_msg)
+ assert_(type(res[-1]["out"]) is tuple, err_msg)
+ assert_(res[-1]["out"][0] is arr, err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ assert_(arr_imethod(obj) is arr, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_imethod, obj,
+ err_msg=err_msg)
+
+ op_fn = getattr(operator, op, None)
+ if op_fn is None:
+ op_fn = getattr(operator, op + "_", None)
+ if op_fn is None:
+ op_fn = getattr(builtins, op)
+ assert_equal(op_fn(obj, arr), "forward", err_msg)
+ if not isinstance(obj, np.ndarray):
+ if binop_override_expected:
+ assert_equal(op_fn(arr, obj), "reverse", err_msg)
+ elif ufunc_override_expected:
+ assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
+ err_msg)
+ if ufunc_override_expected:
+ assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
+ err_msg)
+
+ # No array priority, no array_ufunc -> nothing called
+ check(make_obj(object), False, False, False)
+ # Negative array priority, no array_ufunc -> nothing called
+ # (has to be very negative, because scalar priority is -1000000.0)
+ check(make_obj(object, array_priority=-2**30), False, False, False)
+ # Positive array priority, no array_ufunc -> binops and iops only
+ check(make_obj(object, array_priority=1), True, False, True)
+ # ndarray ignores array_priority for ndarray subclasses
+ check(make_obj(np.ndarray, array_priority=1), False, False, False,
+ check_scalar=False)
+ # Positive array_priority and array_ufunc -> array_ufunc only
+ check(make_obj(object, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ check(make_obj(np.ndarray, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ # array_ufunc set to None -> defer binops only
+ check(make_obj(object, array_ufunc=None), True, False, False)
+ check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
+ check_scalar=False)
+
+ def test_ufunc_override_normalize_signature(self):
+ # gh-5674
+ class SomeClass(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ return kw
+
+ a = SomeClass()
+ kw = np.add(a, [1])
+ assert_('sig' not in kw and 'signature' not in kw)
+ kw = np.add(a, [1], sig='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+ kw = np.add(a, [1], signature='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+
+ def test_array_ufunc_index(self):
+ # Check that index is set appropriately, also if only an output
+ # is passed on (latter is another regression tests for github bug 4753)
+ # This also checks implicitly that 'out' is always a tuple.
+ class CheckIndex(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ for i, a in enumerate(inputs):
+ if a is self:
+ return i
+ # calls below mean we must be in an output.
+ for j, a in enumerate(kw['out']):
+ if a is self:
+ return (j,)
+
+ a = CheckIndex()
+ dummy = np.arange(2.)
+ # 1 input, 1 output
+ assert_equal(np.sin(a), 0)
+ assert_equal(np.sin(dummy, a), (0,))
+ assert_equal(np.sin(dummy, out=a), (0,))
+ assert_equal(np.sin(dummy, out=(a,)), (0,))
+ assert_equal(np.sin(a, a), 0)
+ assert_equal(np.sin(a, out=a), 0)
+ assert_equal(np.sin(a, out=(a,)), 0)
+ # 1 input, 2 outputs
+ assert_equal(np.modf(dummy, a), (0,))
+ assert_equal(np.modf(dummy, None, a), (1,))
+ assert_equal(np.modf(dummy, dummy, a), (1,))
+ assert_equal(np.modf(dummy, out=(a, None)), (0,))
+ assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
+ assert_equal(np.modf(dummy, out=(None, a)), (1,))
+ assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
+ assert_equal(np.modf(a, out=(dummy, a)), 0)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_equal(np.modf(dummy, out=a), (0,))
+ assert_(w[0].category is DeprecationWarning)
+ assert_raises(ValueError, np.modf, dummy, out=(a,))
+
+ # 2 inputs, 1 output
+ assert_equal(np.add(a, dummy), 0)
+ assert_equal(np.add(dummy, a), 1)
+ assert_equal(np.add(dummy, dummy, a), (0,))
+ assert_equal(np.add(dummy, a, a), 1)
+ assert_equal(np.add(dummy, dummy, out=a), (0,))
+ assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
+ assert_equal(np.add(a, dummy, out=a), 0)
+
+ def test_out_override(self):
+ # regression test for github bug 4753
+ class OutClass(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if 'out' in kw:
+ tmp_kw = kw.copy()
+ tmp_kw.pop('out')
+ func = getattr(ufunc, method)
+ kw['out'][0][...] = func(*inputs, **tmp_kw)
+
+ A = np.array([0]).view(OutClass)
+ B = np.array([5])
+ C = np.array([6])
+ np.multiply(C, B, A)
+ assert_equal(A[0], 30)
+ assert_(isinstance(A, OutClass))
+ A[0] = 0
+ np.multiply(C, B, out=A)
+ assert_equal(A[0], 30)
+ assert_(isinstance(A, OutClass))
+
+ def test_pow_override_with_errors(self):
+ # regression test for gh-9112
+ class PowerOnly(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if ufunc is not np.power:
+ raise NotImplementedError
+ return "POWER!"
+ # explicit cast to float, to ensure the fast power path is taken.
+ a = np.array(5., dtype=np.float64).view(PowerOnly)
+ assert_equal(a ** 2.5, "POWER!")
+ with assert_raises(NotImplementedError):
+ a ** 0.5
+ with assert_raises(NotImplementedError):
+ a ** 0
+ with assert_raises(NotImplementedError):
+ a ** 1
+ with assert_raises(NotImplementedError):
+ a ** -1
+ with assert_raises(NotImplementedError):
+ a ** 2
+
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass(object):
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
+ def test_pos_array_ufunc_override(self):
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*[i.view(np.ndarray) for
+ i in inputs], **kwargs)
+ tst = np.array('foo').view(A)
+ with assert_raises(TypeError):
+ +tst
+
+
+class TestTemporaryElide(object):
+ # elision is only triggered on relatively large arrays
+
+ def test_extension_incref_elide(self):
+ # test extension (e.g. cython) calling PyNumber_* slots without
+ # increasing the reference counts
+ #
+ # def incref_elide(a):
+ # d = input.copy() # refcount 1
+ # return d, d + d # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide
+ d = np.ones(100000)
+ orig, res = incref_elide(d)
+ d + d
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(orig, d)
+ assert_array_equal(res, d + d)
+
+ def test_extension_incref_elide_stack(self):
+ # scanning if the refcount == 1 object is on the python stack to check
+ # that we are called directly from python is flawed as object may still
+ # be above the stack pointer and we have no access to the top of it
+ #
+ # def incref_elide_l(d):
+ # return l[4] + l[4] # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide_l
+ # padding with 1 makes sure the object on the stack is not overwritten
+ l = [1, 1, 1, 1, np.ones(100000)]
+ res = incref_elide_l(l)
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(l[4], np.ones(100000))
+ assert_array_equal(res, l[4] + l[4])
+
+ def test_temporary_with_cast(self):
+ # check that we don't elide into a temporary which would need casting
+ d = np.ones(200000, dtype=np.int64)
+ assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+ r = ((d + d) / 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = np.true_divide((d + d), 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) / 2.)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) // 2)
+ assert_equal(r.dtype, np.dtype(np.int64))
+
+ # commutative elision into the astype result
+ f = np.ones(100000, dtype=np.float32)
+ assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+ # no elision into lower type
+ d = f.astype(np.float64)
+ assert_equal(((f + f) + d).dtype, d.dtype)
+ l = np.ones(100000, dtype=np.longdouble)
+ assert_equal(((d + d) + l).dtype, l.dtype)
+
+ # test unary abs with different output dtype
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ c = np.ones(100000, dtype=dt)
+ r = abs(c * 2.0)
+ assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+ def test_elide_broadcast(self):
+ # test no elision on broadcast to higher dimension
+ # only triggers elision code path in debug mode as triggering it in
+ # normal mode needs 256kb large matching dimension, so a lot of memory
+ d = np.ones((2000, 1), dtype=int)
+ b = np.ones((2000), dtype=bool)
+ r = (1 - d) + b
+ assert_equal(r, 1)
+ assert_equal(r.shape, (2000, 2000))
+
+ def test_elide_scalar(self):
+ # check inplace op does not create ndarray from scalars
+ a = np.bool_()
+ assert_(type(~(a & a)) is np.bool_)
+
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
+ def test_elide_readonly(self):
+ # don't try to elide readonly temporaries
+ r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+ assert_equal(r, 0)
+
+ def test_elide_updateifcopy(self):
+ a = np.ones(2**20)[::2]
+ b = a.flat.__array__() + 1
+ del b
+ assert_equal(a, 1)
+
+
+class TestCAPI(object):
+ def test_IsPythonScalar(self):
+ from numpy.core._multiarray_tests import IsPythonScalar
+ assert_(IsPythonScalar(b'foobar'))
+ assert_(IsPythonScalar(1))
+ assert_(IsPythonScalar(2**80))
+ assert_(IsPythonScalar(2.))
+ assert_(IsPythonScalar("a"))
+
+
+class TestSubscripting(object):
+ def test_test_zero_rank(self):
+ x = np.array([1, 2, 3])
+ assert_(isinstance(x[0], np.int_))
+ if sys.version_info[0] < 3:
+ assert_(isinstance(x[0], int))
+ assert_(type(x[0, ...]) is np.ndarray)
+
+
+class TestPickling(object):
+ def test_highest_available_pickle_protocol(self):
+ try:
+ import pickle5
+ except ImportError:
+ pickle5 = None
+
+ if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
+ assert pickle.HIGHEST_PROTOCOL >= 5
+ else:
+ assert pickle.HIGHEST_PROTOCOL < 5
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+ reason=('this tests the error messages when trying to'
+ 'protocol 5 although it is not available'))
+ def test_correct_protocol5_error_message(self):
+ array = np.arange(10)
+
+ if sys.version_info[:2] in ((3, 6), (3, 7)):
+ # For the specific case of python3.6 and 3.7, raise a clear import
+ # error about the pickle5 backport when trying to use protocol=5
+ # without the pickle5 package
+ with pytest.raises(ImportError):
+ array.__reduce_ex__(5)
+
+ elif sys.version_info[:2] < (3, 6):
+ # when calling __reduce_ex__ explicitly with protocol=5 on python
+ # raise a ValueError saying that protocol 5 is not available for
+ # this python version
+ with pytest.raises(ValueError):
+ array.__reduce_ex__(5)
+
+ def test_record_array_with_object_dtype(self):
+ my_object = object()
+
+ arr_with_object = np.array(
+ [(my_object, 1, 2.0)],
+ dtype=[('a', object), ('b', int), ('c', float)])
+ arr_without_object = np.array(
+ [('xxx', 1, 2.0)],
+ dtype=[('a', str), ('b', int), ('c', float)])
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_arr_with_object = pickle.loads(
+ pickle.dumps(arr_with_object, protocol=proto))
+ depickled_arr_without_object = pickle.loads(
+ pickle.dumps(arr_without_object, protocol=proto))
+
+ assert_equal(arr_with_object.dtype,
+ depickled_arr_with_object.dtype)
+ assert_equal(arr_without_object.dtype,
+ depickled_arr_without_object.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_f_contiguous_array(self):
+ f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ buffers = []
+
+ # When using pickle protocol 5, Fortran-contiguous arrays can be
+ # serialized using out-of-band buffers
+ bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+ buffer_callback=buffers.append)
+
+ assert len(buffers) > 0
+
+ depickled_f_contiguous_array = pickle.loads(bytes_string,
+ buffers=buffers)
+
+ assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+ def test_non_contiguous_array(self):
+ non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+ assert not non_contiguous_array.flags.c_contiguous
+ assert not non_contiguous_array.flags.f_contiguous
+
+ # make sure non-contiguous arrays can be pickled-depickled
+ # using any protocol
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_non_contiguous_array = pickle.loads(
+ pickle.dumps(non_contiguous_array, protocol=proto))
+
+ assert_equal(non_contiguous_array, depickled_non_contiguous_array)
+
+ def test_roundtrip(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ carray = np.array([[2, 9], [7, 0], [3, 8]])
+ DATA = [
+ carray,
+ np.transpose(carray),
+ np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
+ ('c', float)])
+ ]
+
+ refs = [weakref.ref(a) for a in DATA]
+ for a in DATA:
+ assert_equal(
+ a, pickle.loads(pickle.dumps(a, protocol=proto)),
+ err_msg="%r" % a)
+ del a, DATA, carray
+ gc.collect()
+ # check for reference leaks (gh-12793)
+ for ref in refs:
+ assert ref() is None
+
+ def _loads(self, obj):
+ if sys.version_info[0] >= 3:
+ return pickle.loads(obj, encoding='latin1')
+ else:
+ return pickle.loads(obj)
+
+ # version 0 pickles, using protocol=2 to pickle
+ # version 0 doesn't have a version field
+ def test_version0_int8(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+ a = np.array([1, 2, 3, 4], dtype=np.int8)
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_version0_float32(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
+
+ def test_mixed(self):
+ g1 = np.array(["spam", "spa", "spammer", "and eggs"])
+ g2 = "spam"
+ assert_array_equal(g1 == g2, [x == g2 for x in g1])
+ assert_array_equal(g1 != g2, [x != g2 for x in g1])
+ assert_array_equal(g1 < g2, [x < g2 for x in g1])
+ assert_array_equal(g1 > g2, [x > g2 for x in g1])
+ assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
+ assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
+
+ def test_unicode(self):
+ g1 = np.array([u"This", u"is", u"example"])
+ g2 = np.array([u"This", u"was", u"example"])
+ assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
+
+
+class TestArgmax(object):
+
+ nan_arr = [
+ ([0, 1, 2, 3, np.nan], 4),
+ ([0, 1, 2, np.nan, 3], 3),
+ ([np.nan, 0, 1, 2, 3], 0),
+ ([np.nan, 0, np.nan, 2, 3], 0),
+ ([0, 1, 2, 3, complex(0, np.nan)], 4),
+ ([0, 1, 2, 3, complex(np.nan, 0)], 4),
+ ([0, 1, 2, complex(np.nan, 0), 3], 3),
+ ([0, 1, 2, complex(0, np.nan), 3], 3),
+ ([complex(0, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
+
+ ([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
+ ([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
+ ([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
+
+ ([np.datetime64('1923-04-14T12:43:12'),
+ np.datetime64('1994-06-21T14:43:15'),
+ np.datetime64('2001-10-15T04:10:32'),
+ np.datetime64('1995-11-25T16:02:16'),
+ np.datetime64('2005-01-04T03:14:12'),
+ np.datetime64('2041-12-03T14:05:03')], 5),
+ ([np.datetime64('1935-09-14T04:40:11'),
+ np.datetime64('1949-10-12T12:32:11'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('2015-11-20T12:20:59'),
+ np.datetime64('1932-09-23T10:10:13'),
+ np.datetime64('2014-10-10T03:50:30')], 3),
+ # Assorted tests with NaTs
+ ([np.datetime64('NaT'),
+ np.datetime64('NaT'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('NaT'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 4),
+ ([np.datetime64('2059-03-14T12:43:12'),
+ np.datetime64('1996-09-21T14:43:15'),
+ np.datetime64('NaT'),
+ np.datetime64('2022-12-25T16:02:16'),
+ np.datetime64('1963-10-04T03:14:12'),
+ np.datetime64('2013-05-08T18:15:23')], 0),
+ ([np.timedelta64(2, 's'),
+ np.timedelta64(1, 's'),
+ np.timedelta64('NaT', 's'),
+ np.timedelta64(3, 's')], 3),
+ ([np.timedelta64('NaT', 's')] * 3, 0),
+
+ ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
+ timedelta(days=-1, seconds=23)], 0),
+ ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
+ timedelta(days=5, seconds=14)], 1),
+ ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
+ timedelta(days=10, seconds=43)], 2),
+
+ ([False, False, False, False, True], 4),
+ ([False, False, False, True, False], 3),
+ ([True, False, False, False, False], 0),
+ ([True, False, True, False, False], 0),
+ ]
+
+ def test_all(self):
+ a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
+ for i in range(a.ndim):
+ amax = a.max(i)
+ aargmax = a.argmax(i)
+ axes = list(range(a.ndim))
+ axes.remove(i)
+ assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
+
+ def test_combinations(self):
+ for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ max_val = np.max(arr)
+
+ assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
+ assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
+
+ def test_output_shape(self):
+ # see also gh-616
+ a = np.ones((10, 5))
+ # Check some simple shape mismatches
+ out = np.ones(11, dtype=np.int_)
+ assert_raises(ValueError, a.argmax, -1, out)
+
+ out = np.ones((2, 5), dtype=np.int_)
+ assert_raises(ValueError, a.argmax, -1, out)
+
+ # these could be relaxed possibly (used to allow even the previous)
+ out = np.ones((1, 10), dtype=np.int_)
+ assert_raises(ValueError, a.argmax, -1, out)
+
+ out = np.ones(10, dtype=np.int_)
+ a.argmax(-1, out=out)
+ assert_equal(out, a.argmax(-1))
+
+ def test_argmax_unicode(self):
+ d = np.zeros(6031, dtype='= cmin))
+ assert_(np.all(x <= cmax))
+
+ def _clip_type(self, type_group, array_max,
+ clip_min, clip_max, inplace=False,
+ expected_min=None, expected_max=None):
+ if expected_min is None:
+ expected_min = clip_min
+ if expected_max is None:
+ expected_max = clip_max
+
+ for T in np.sctypes[type_group]:
+ if sys.byteorder == 'little':
+ byte_orders = ['=', '>']
+ else:
+ byte_orders = ['<', '=']
+
+ for byteorder in byte_orders:
+ dtype = np.dtype(T).newbyteorder(byteorder)
+
+ x = (np.random.random(1000) * array_max).astype(dtype)
+ if inplace:
+ x.clip(clip_min, clip_max, x)
+ else:
+ x = x.clip(clip_min, clip_max)
+ byteorder = '='
+
+ if x.dtype.byteorder == '|':
+ byteorder = '|'
+ assert_equal(x.dtype.byteorder, byteorder)
+ self._check_range(x, expected_min, expected_max)
+ return x
+
+ def test_basic(self):
+ for inplace in [False, True]:
+ self._clip_type(
+ 'float', 1024, -12.8, 100.2, inplace=inplace)
+ self._clip_type(
+ 'float', 1024, 0, 0, inplace=inplace)
+
+ self._clip_type(
+ 'int', 1024, -120, 100.5, inplace=inplace)
+ self._clip_type(
+ 'int', 1024, 0, 0, inplace=inplace)
+
+ self._clip_type(
+ 'uint', 1024, 0, 0, inplace=inplace)
+ self._clip_type(
+ 'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
+
+ def test_record_array(self):
+ rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
+ dtype=[('x', '= 3))
+ x = val.clip(min=3)
+ assert_(np.all(x >= 3))
+ x = val.clip(max=4)
+ assert_(np.all(x <= 4))
+
+ def test_nan(self):
+ input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
+ result = input_arr.clip(-1, 1)
+ expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
+ assert_array_equal(result, expected)
+
+
+class TestCompress(object):
+ def test_axis(self):
+ tgt = [[5, 6, 7, 8, 9]]
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr, axis=0)
+ assert_equal(out, tgt)
+
+ tgt = [[1, 3], [6, 8]]
+ out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_truncate(self):
+ tgt = [[1], [6]]
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_flatten(self):
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr)
+ assert_equal(out, 1)
+
+
+class TestPutmask(object):
+ def tst_basic(self, x, T, mask, val):
+ np.putmask(x, mask, val)
+ assert_equal(x[mask], T(val))
+ assert_equal(x.dtype, T)
+
+ def test_ip_types(self):
+ unchecked_types = [bytes, unicode, np.void, object]
+
+ x = np.random.random(1000)*100
+ mask = x < 40
+
+ for val in [-100, 0, 15]:
+ for types in np.sctypes.values():
+ for T in types:
+ if T not in unchecked_types:
+ self.tst_basic(x.copy().astype(T), T, mask, val)
+
+ def test_mask_size(self):
+ assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
+
+ @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', 'i4', 'f8'), ('z', ' 16MB
+ d = np.zeros(4 * 1024 ** 2)
+ d.tofile(self.filename)
+ assert_equal(os.path.getsize(self.filename), d.nbytes)
+ assert_array_equal(d, np.fromfile(self.filename))
+ # check offset
+ with open(self.filename, "r+b") as f:
+ f.seek(d.nbytes)
+ d.tofile(f)
+ assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+ # check append mode (gh-8329)
+ open(self.filename, "w").close() # delete file contents
+ with open(self.filename, "ab") as f:
+ d.tofile(f)
+ assert_array_equal(d, np.fromfile(self.filename))
+ with open(self.filename, "ab") as f:
+ d.tofile(f)
+ assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
+
+ def test_io_open_buffered_fromfile(self):
+ # gh-6632
+ self.x.tofile(self.filename)
+ with io.open(self.filename, 'rb', buffering=-1) as f:
+ y = np.fromfile(f, dtype=self.dtype)
+ assert_array_equal(y, self.x.flat)
+
+ def test_file_position_after_fromfile(self):
+ # gh-4118
+ sizes = [io.DEFAULT_BUFFER_SIZE//8,
+ io.DEFAULT_BUFFER_SIZE,
+ io.DEFAULT_BUFFER_SIZE*8]
+
+ for size in sizes:
+ f = open(self.filename, 'wb')
+ f.seek(size-1)
+ f.write(b'\0')
+ f.close()
+
+ for mode in ['rb', 'r+b']:
+ err_msg = "%d %s" % (size, mode)
+
+ f = open(self.filename, mode)
+ f.read(2)
+ np.fromfile(f, dtype=np.float64, count=1)
+ pos = f.tell()
+ f.close()
+ assert_equal(pos, 10, err_msg=err_msg)
+
+ def test_file_position_after_tofile(self):
+ # gh-4118
+ sizes = [io.DEFAULT_BUFFER_SIZE//8,
+ io.DEFAULT_BUFFER_SIZE,
+ io.DEFAULT_BUFFER_SIZE*8]
+
+ for size in sizes:
+ err_msg = "%d" % (size,)
+
+ f = open(self.filename, 'wb')
+ f.seek(size-1)
+ f.write(b'\0')
+ f.seek(10)
+ f.write(b'12')
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
+ f.close()
+ assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
+
+ f = open(self.filename, 'r+b')
+ f.read(2)
+ f.seek(0, 1) # seek between read&write required by ANSI C
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
+ f.close()
+ assert_equal(pos, 10, err_msg=err_msg)
+
+ def test_load_object_array_fromfile(self):
+ # gh-12300
+ with open(self.filename, 'w') as f:
+ # Ensure we have a file with consistent contents
+ pass
+
+ with open(self.filename, 'rb') as f:
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, f, dtype=object)
+
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, self.filename, dtype=object)
+
+ def _check_from(self, s, value, **kw):
+ if 'sep' not in kw:
+ y = np.frombuffer(s, **kw)
+ else:
+ y = np.fromstring(s, **kw)
+ assert_array_equal(y, value)
+
+ f = open(self.filename, 'wb')
+ f.write(s)
+ f.close()
+ y = np.fromfile(self.filename, **kw)
+ assert_array_equal(y, value)
+
+ def test_nan(self):
+ self._check_from(
+ b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ sep=' ')
+
+ def test_inf(self):
+ self._check_from(
+ b"inf +inf -inf infinity -Infinity iNfInItY -inF",
+ [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
+ sep=' ')
+
+ def test_numbers(self):
+ self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
+ [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
+
+ def test_binary(self):
+ self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+ np.array([1, 2, 3, 4]),
+ dtype=' 1 minute on mechanical hard drive
+ def test_big_binary(self):
+ """Test workarounds for 32-bit limited fwrite, fseek, and ftell
+ calls in windows. These normally would hang doing something like this.
+ See http://projects.scipy.org/numpy/ticket/1660"""
+ if sys.platform != 'win32':
+ return
+ try:
+ # before workarounds, only up to 2**32-1 worked
+ fourgbplus = 2**32 + 2**16
+ testbytes = np.arange(8, dtype=np.int8)
+ n = len(testbytes)
+ flike = tempfile.NamedTemporaryFile()
+ f = flike.file
+ np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
+ flike.seek(0)
+ a = np.fromfile(f, dtype=np.int8)
+ flike.close()
+ assert_(len(a) == fourgbplus)
+ # check only start and end for speed:
+ assert_((a[:n] == testbytes).all())
+ assert_((a[-n:] == testbytes).all())
+ except (MemoryError, ValueError):
+ pass
+
+ def test_string(self):
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
+
+ def test_counted_string(self):
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
+
+ def test_string_with_ws(self):
+ self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
+
+ def test_counted_string_with_ws(self):
+ self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
+ sep=' ')
+
+ def test_ascii(self):
+ self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
+
+ def test_malformed(self):
+ self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
+
+ def test_long_sep(self):
+ self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
+
+ def test_dtype(self):
+ v = np.array([1, 2, 3, 4], dtype=np.int_)
+ self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
+
+ def test_dtype_bool(self):
+ # can't use _check_from because fromstring can't handle True/False
+ v = np.array([True, False, True, False], dtype=np.bool_)
+ s = b'1,0,-2.3,0'
+ f = open(self.filename, 'wb')
+ f.write(s)
+ f.close()
+ y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
+ assert_(y.dtype == '?')
+ assert_array_equal(y, v)
+
+ def test_tofile_sep(self):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ f = open(self.filename, 'w')
+ x.tofile(f, sep=',')
+ f.close()
+ f = open(self.filename, 'r')
+ s = f.read()
+ f.close()
+ #assert_equal(s, '1.51,2.0,3.51,4.0')
+ y = np.array([float(p) for p in s.split(',')])
+ assert_array_equal(x,y)
+
+ def test_tofile_format(self):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ f = open(self.filename, 'w')
+ x.tofile(f, sep=',', format='%.2f')
+ f.close()
+ f = open(self.filename, 'r')
+ s = f.read()
+ f.close()
+ assert_equal(s, '1.51,2.00,3.51,4.00')
+
+ def test_locale(self):
+ with CommaDecimalPointLocale():
+ self.test_numbers()
+ self.test_nan()
+ self.test_inf()
+ self.test_counted_string()
+ self.test_ascii()
+ self.test_malformed()
+ self.test_tofile_sep()
+ self.test_tofile_format()
+
+
+class TestFromBuffer(object):
+ @pytest.mark.parametrize('byteorder', ['<', '>'])
+ @pytest.mark.parametrize('dtype', [float, int, complex])
+ def test_basic(self, byteorder, dtype):
+ dt = np.dtype(dtype).newbyteorder(byteorder)
+ x = (np.random.random((4, 7)) * 5).astype(dt)
+ buf = x.tobytes()
+ assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
+
+ def test_empty(self):
+ assert_array_equal(np.frombuffer(b''), np.array([]))
+
+
+class TestFlat(object):
+ def setup(self):
+ a0 = np.arange(20.0)
+ a = a0.reshape(4, 5)
+ a0.shape = (4, 5)
+ a.flags.writeable = False
+ self.a = a
+ self.b = a[::2, ::2]
+ self.a0 = a0
+ self.b0 = a0[::2, ::2]
+
+ def test_contiguous(self):
+ testpassed = False
+ try:
+ self.a.flat[12] = 100.0
+ except ValueError:
+ testpassed = True
+ assert_(testpassed)
+ assert_(self.a.flat[12] == 12.0)
+
+ def test_discontiguous(self):
+ testpassed = False
+ try:
+ self.b.flat[4] = 100.0
+ except ValueError:
+ testpassed = True
+ assert_(testpassed)
+ assert_(self.b.flat[4] == 12.0)
+
+ def test___array__(self):
+ c = self.a.flat.__array__()
+ d = self.b.flat.__array__()
+ e = self.a0.flat.__array__()
+ f = self.b0.flat.__array__()
+
+ assert_(c.flags.writeable is False)
+ assert_(d.flags.writeable is False)
+ # for 1.14 all are set to non-writeable on the way to replacing the
+ # UPDATEIFCOPY array returned for non-contiguous arrays.
+ assert_(e.flags.writeable is True)
+ assert_(f.flags.writeable is False)
+ with assert_warns(DeprecationWarning):
+ assert_(c.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ assert_(d.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ assert_(e.flags.updateifcopy is False)
+ with assert_warns(DeprecationWarning):
+ # UPDATEIFCOPY is removed.
+ assert_(f.flags.updateifcopy is False)
+ assert_(c.flags.writebackifcopy is False)
+ assert_(d.flags.writebackifcopy is False)
+ assert_(e.flags.writebackifcopy is False)
+ assert_(f.flags.writebackifcopy is False)
+
+
+class TestResize(object):
+ def test_basic(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ if IS_PYPY:
+ x.resize((5, 5), refcheck=False)
+ else:
+ x.resize((5, 5))
+ assert_array_equal(x.flat[:9],
+ np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
+ assert_array_equal(x[9:].flat, 0)
+
+ def test_check_reference(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ y = x
+ assert_raises(ValueError, x.resize, (5, 1))
+ del y # avoid pyflakes unused variable warning.
+
+ def test_int_shape(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(3, refcheck=False)
+ else:
+ x.resize(3)
+ assert_array_equal(x, np.eye(3)[0,:])
+
+ def test_none_shape(self):
+ x = np.eye(3)
+ x.resize(None)
+ assert_array_equal(x, np.eye(3))
+ x.resize()
+ assert_array_equal(x, np.eye(3))
+
+ def test_0d_shape(self):
+ # to it multiple times to test it does not break alloc cache gh-9216
+ for i in range(10):
+ x = np.empty((1,))
+ x.resize(())
+ assert_equal(x.shape, ())
+ assert_equal(x.size, 1)
+ x = np.empty(())
+ x.resize((1,))
+ assert_equal(x.shape, (1,))
+ assert_equal(x.size, 1)
+
+ def test_invalid_arguments(self):
+ assert_raises(TypeError, np.eye(3).resize, 'hi')
+ assert_raises(ValueError, np.eye(3).resize, -1)
+ assert_raises(TypeError, np.eye(3).resize, order=1)
+ assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+
+ def test_freeform_shape(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(3, 2, 1, refcheck=False)
+ else:
+ x.resize(3, 2, 1)
+ assert_(x.shape == (3, 2, 1))
+
+ def test_zeros_appended(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(2, 3, 3, refcheck=False)
+ else:
+ x.resize(2, 3, 3)
+ assert_array_equal(x[0], np.eye(3))
+ assert_array_equal(x[1], np.zeros((3, 3)))
+
+ def test_obj_obj(self):
+ # check memory is initialized on resize, gh-4857
+ a = np.ones(10, dtype=[('k', object, 2)])
+ if IS_PYPY:
+ a.resize(15, refcheck=False)
+ else:
+ a.resize(15,)
+ assert_equal(a.shape, (15,))
+ assert_array_equal(a['k'][-5:], 0)
+ assert_array_equal(a['k'][:-5], 1)
+
+ def test_empty_view(self):
+ # check that sizes containing a zero don't trigger a reallocate for
+ # already empty arrays
+ x = np.zeros((10, 0), int)
+ x_view = x[...]
+ x_view.resize((0, 10))
+ x_view.resize((0, 100))
+
+ def test_check_weakref(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ xref = weakref.ref(x)
+ assert_raises(ValueError, x.resize, (5, 1))
+ del xref # avoid pyflakes unused variable warning.
+
+
+class TestRecord(object):
+ def test_field_rename(self):
+ dt = np.dtype([('f', float), ('i', int)])
+ dt.names = ['p', 'q']
+ assert_equal(dt.names, ['p', 'q'])
+
+ def test_multiple_field_name_occurrence(self):
+ def test_dtype_init():
+ np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+
+ # Error raised when multiple fields have the same name
+ assert_raises(ValueError, test_dtype_init)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_bytes_fields(self):
+ # Bytes are not allowed in field names and not recognized in titles
+ # on Py3
+ assert_raises(TypeError, np.dtype, [(b'a', int)])
+ assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
+
+ dt = np.dtype([((b'a', 'b'), int)])
+ assert_raises(TypeError, dt.__getitem__, b'a')
+
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ assert_raises(IndexError, x.__getitem__, b'a')
+
+ y = x[0]
+ assert_raises(IndexError, y.__getitem__, b'a')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
+ def test_multiple_field_name_unicode(self):
+ def test_dtype_unicode():
+ np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
+
+ # Error raised when multiple fields have the same name(unicode included)
+ assert_raises(ValueError, test_dtype_unicode)
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_titles(self):
+ # Unicode field titles are added to field dict on Py2
+ title = u'b'
+ dt = np.dtype([((title, 'a'), int)])
+ dt[title]
+ dt['a']
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ x[title]
+ x['a']
+ y = x[0]
+ y[title]
+ y['a']
+
+ @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
+ def test_unicode_field_names(self):
+ # Unicode field names are converted to ascii on Python 2:
+ encodable_name = u'b'
+ assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
+ assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
+
+ # But raises UnicodeEncodeError if it can't be encoded:
+ nonencodable_name = u'\uc3bc'
+ assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
+ assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
+
+ def test_fromarrays_unicode(self):
+ # A single name string provided to fromarrays() is allowed to be unicode
+ # on both Python 2 and 3:
+ x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
+ assert_equal(x['a'][0], 0)
+ assert_equal(x['b'][0], 1)
+
+ def test_unicode_order(self):
+ # Test that we can sort with order as a unicode field name in both Python 2 and
+ # 3:
+ name = u'b'
+ x = np.array([1, 3, 2], dtype=[(name, int)])
+ x.sort(order=name)
+ assert_equal(x[u'b'], np.array([1, 2, 3]))
+
+ def test_field_names(self):
+ # Test unicode and 8-bit / byte strings can be used
+ a = np.zeros((1,), dtype=[('f1', 'i4'),
+ ('f2', 'i4'),
+ ('f3', [('sf1', 'i4')])])
+ is_py3 = sys.version_info[0] >= 3
+ if is_py3:
+ funcs = (str,)
+ # byte string indexing fails gracefully
+ assert_raises(IndexError, a.__setitem__, b'f1', 1)
+ assert_raises(IndexError, a.__getitem__, b'f1')
+ assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
+ assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
+ else:
+ funcs = (str, unicode)
+ for func in funcs:
+ b = a.copy()
+ fn1 = func('f1')
+ b[fn1] = 1
+ assert_equal(b[fn1], 1)
+ fnn = func('not at all')
+ assert_raises(ValueError, b.__setitem__, fnn, 1)
+ assert_raises(ValueError, b.__getitem__, fnn)
+ b[0][fn1] = 2
+ assert_equal(b[fn1], 2)
+ # Subfield
+ assert_raises(ValueError, b[0].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[0].__getitem__, fnn)
+ # Subfield
+ fn3 = func('f3')
+ sfn1 = func('sf1')
+ b[fn3][sfn1] = 1
+ assert_equal(b[fn3][sfn1], 1)
+ assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[fn3].__getitem__, fnn)
+ # multiple subfields
+ fn2 = func('f2')
+ b[fn2] = 3
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
+ # non-ascii unicode field indexing is well behaved
+ if not is_py3:
+ pytest.skip('non ascii unicode field indexing skipped; '
+ 'raises segfault on python 2.x')
+ else:
+ assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
+ assert_raises(ValueError, a.__getitem__, u'\u03e0')
+
+ def test_record_hash(self):
+ a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+ a.flags.writeable = False
+ b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
+ b.flags.writeable = False
+ c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
+ c.flags.writeable = False
+ assert_(hash(a[0]) == hash(a[1]))
+ assert_(hash(a[0]) == hash(b[0]))
+ assert_(hash(a[0]) != hash(b[1]))
+ assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+
+ def test_record_no_hash(self):
+ a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+ assert_raises(TypeError, hash, a[0])
+
+ def test_empty_structure_creation(self):
+ # make sure these do not raise errors (gh-5631)
+ np.array([()], dtype={'names': [], 'formats': [],
+ 'offsets': [], 'itemsize': 12})
+ np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
+ 'offsets': [], 'itemsize': 12})
+
+ def test_multifield_indexing_view(self):
+ a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+ v = a[['a', 'c']]
+ assert_(v.base is a)
+ assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+ 'formats': ['i4', 'u4'],
+ 'offsets': [0, 8]}))
+ v[:] = (4,5)
+ assert_equal(a[0].item(), (4, 1, 5))
+
+class TestView(object):
+ def test_basic(self):
+ x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
+ dtype=[('r', np.int8), ('g', np.int8),
+ ('b', np.int8), ('a', np.int8)])
+ # We must be specific about the endianness here:
+ y = x.view(dtype=' 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ def test_empty(self):
+ A = np.zeros((0, 3))
+ for f in self.funcs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(A, axis=axis)).all())
+ assert_(len(w) > 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(A, axis=axis), np.zeros([]))
+
+ def test_mean_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * mat.shape[axis]
+ assert_almost_equal(res, tgt)
+ for axis in [None]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * np.prod(mat.shape)
+ assert_almost_equal(res, tgt)
+
+ def test_mean_float16(self):
+ # This fail if the sum inside mean is done in float16 instead
+ # of float32.
+ assert_(_mean(np.ones(100000, dtype='float16')) == 1)
+
+ def test_var_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1, None]:
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_std_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1, None]:
+ tgt = np.sqrt(_var(mat, axis=axis))
+ res = _std(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_subclass(self):
+ class TestArray(np.ndarray):
+ def __new__(cls, data, info):
+ result = np.array(data)
+ result = result.view(cls)
+ result.info = info
+ return result
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, "info", '')
+
+ dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
+ res = dat.mean(1)
+ assert_(res.info == dat.info)
+ res = dat.std(1)
+ assert_(res.info == dat.info)
+ res = dat.var(1)
+ assert_(res.info == dat.info)
+
+class TestVdot(object):
+ def test_basic(self):
+ dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
+ dt_complex = np.typecodes['Complex']
+
+ # test real
+ a = np.eye(3)
+ for dt in dt_numeric + 'O':
+ b = a.astype(dt)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), 3)
+
+ # test complex
+ a = np.eye(3) * 1j
+ for dt in dt_complex + 'O':
+ b = a.astype(dt)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), 3)
+
+ # test boolean
+ b = np.eye(3, dtype=bool)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), True)
+
+ def test_vdot_array_order(self):
+ a = np.array([[1, 2], [3, 4]], order='C')
+ b = np.array([[1, 2], [3, 4]], order='F')
+ res = np.vdot(a, a)
+
+ # integer arrays are exact
+ assert_equal(np.vdot(a, b), res)
+ assert_equal(np.vdot(b, a), res)
+ assert_equal(np.vdot(b, b), res)
+
+ def test_vdot_uncontiguous(self):
+ for size in [2, 1000]:
+ # Different sizes match different branches in vdot.
+ a = np.zeros((size, 2, 2))
+ b = np.zeros((size, 2, 2))
+ a[:, 0, 0] = np.arange(size)
+ b[:, 0, 0] = np.arange(size) + 1
+ # Make a and b uncontiguous:
+ a = a[..., 0]
+ b = b[..., 0]
+
+ assert_equal(np.vdot(a, b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy()),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy(), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy('F'), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy('F')),
+ np.vdot(a.flatten(), b.flatten()))
+
+
+class TestDot(object):
+ def setup(self):
+ np.random.seed(128)
+ self.A = np.random.rand(4, 2)
+ self.b1 = np.random.rand(2, 1)
+ self.b2 = np.random.rand(2)
+ self.b3 = np.random.rand(1, 2)
+ self.b4 = np.random.rand(4)
+ self.N = 7
+
+ def test_dotmatmat(self):
+ A = self.A
+ res = np.dot(A.transpose(), A)
+ tgt = np.array([[1.45046013, 0.86323640],
+ [0.86323640, 0.84934569]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotmatvec(self):
+ A, b1 = self.A, self.b1
+ res = np.dot(A, b1)
+ tgt = np.array([[0.32114320], [0.04889721],
+ [0.15696029], [0.33612621]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotmatvec2(self):
+ A, b2 = self.A, self.b2
+ res = np.dot(A, b2)
+ tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat(self):
+ A, b4 = self.A, self.b4
+ res = np.dot(b4, A)
+ tgt = np.array([1.23495091, 1.12222648])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat2(self):
+ b3, A = self.b3, self.A
+ res = np.dot(b3, A.transpose())
+ tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat3(self):
+ A, b4 = self.A, self.b4
+ res = np.dot(A.transpose(), b4)
+ tgt = np.array([1.23495091, 1.12222648])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecvecouter(self):
+ b1, b3 = self.b1, self.b3
+ res = np.dot(b1, b3)
+ tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecvecinner(self):
+ b1, b3 = self.b1, self.b3
+ res = np.dot(b3, b1)
+ tgt = np.array([[ 0.23129668]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotcolumnvect1(self):
+ b1 = np.ones((3, 1))
+ b2 = [5.3]
+ res = np.dot(b1, b2)
+ tgt = np.array([5.3, 5.3, 5.3])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotcolumnvect2(self):
+ b1 = np.ones((3, 1)).transpose()
+ b2 = [6.2]
+ res = np.dot(b2, b1)
+ tgt = np.array([6.2, 6.2, 6.2])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecscalar(self):
+ np.random.seed(100)
+ b1 = np.random.rand(1, 1)
+ b2 = np.random.rand(1, 4)
+ res = np.dot(b1, b2)
+ tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecscalar2(self):
+ np.random.seed(100)
+ b1 = np.random.rand(4, 1)
+ b2 = np.random.rand(1, 1)
+ res = np.dot(b1, b2)
+ tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_all(self):
+ dims = [(), (1,), (1, 1)]
+ dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
+ for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
+ b1 = np.zeros(dim1)
+ b2 = np.zeros(dim2)
+ res = np.dot(b1, b2)
+ tgt = np.zeros(dim)
+ assert_(res.shape == tgt.shape)
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_vecobject(self):
+ class Vec(object):
+ def __init__(self, sequence=None):
+ if sequence is None:
+ sequence = []
+ self.array = np.array(sequence)
+
+ def __add__(self, other):
+ out = Vec()
+ out.array = self.array + other.array
+ return out
+
+ def __sub__(self, other):
+ out = Vec()
+ out.array = self.array - other.array
+ return out
+
+ def __mul__(self, other): # with scalar
+ out = Vec(self.array.copy())
+ out.array *= other
+ return out
+
+ def __rmul__(self, other):
+ return self*other
+
+ U_non_cont = np.transpose([[1., 1.], [1., 2.]])
+ U_cont = np.ascontiguousarray(U_non_cont)
+ x = np.array([Vec([1., 0.]), Vec([0., 1.])])
+ zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
+ zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
+ assert_equal(zeros[0].array, zeros_test[0].array)
+ assert_equal(zeros[1].array, zeros_test[1].array)
+
+ def test_dot_2args(self):
+ from numpy.core.multiarray import dot
+
+ a = np.array([[1, 2], [3, 4]], dtype=float)
+ b = np.array([[1, 0], [1, 1]], dtype=float)
+ c = np.array([[3, 2], [7, 4]], dtype=float)
+
+ d = dot(a, b)
+ assert_allclose(c, d)
+
+ def test_dot_3args(self):
+ from numpy.core.multiarray import dot
+
+ np.random.seed(22)
+ f = np.random.random_sample((1024, 16))
+ v = np.random.random_sample((16, 32))
+
+ r = np.empty((1024, 32))
+ for i in range(12):
+ dot(f, v, r)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(r), 2)
+ r2 = dot(f, v, out=None)
+ assert_array_equal(r2, r)
+ assert_(r is dot(f, v, out=r))
+
+ v = v[:, 0].copy() # v.shape == (16,)
+ r = r[:, 0].copy() # r.shape == (1024,)
+ r2 = dot(f, v)
+ assert_(r is dot(f, v, r))
+ assert_array_equal(r2, r)
+
+ def test_dot_3args_errors(self):
+ from numpy.core.multiarray import dot
+
+ np.random.seed(22)
+ f = np.random.random_sample((1024, 16))
+ v = np.random.random_sample((16, 32))
+
+ r = np.empty((1024, 31))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((1024,))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((32,))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((32, 1024))
+ assert_raises(ValueError, dot, f, v, r)
+ assert_raises(ValueError, dot, f, v, r.T)
+
+ r = np.empty((1024, 64))
+ assert_raises(ValueError, dot, f, v, r[:, ::2])
+ assert_raises(ValueError, dot, f, v, r[:, :32])
+
+ r = np.empty((1024, 32), dtype=np.float32)
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((1024, 32), dtype=int)
+ assert_raises(ValueError, dot, f, v, r)
+
+ def test_dot_array_order(self):
+ a = np.array([[1, 2], [3, 4]], order='C')
+ b = np.array([[1, 2], [3, 4]], order='F')
+ res = np.dot(a, a)
+
+ # integer arrays are exact
+ assert_equal(np.dot(a, b), res)
+ assert_equal(np.dot(b, a), res)
+ assert_equal(np.dot(b, b), res)
+
+ def test_accelerate_framework_sgemv_fix(self):
+
+ def aligned_array(shape, align, dtype, order='C'):
+ d = dtype(0)
+ N = np.prod(shape)
+ tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+ address = tmp.__array_interface__["data"][0]
+ for offset in range(align):
+ if (address + offset) % align == 0:
+ break
+ tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+ return tmp.reshape(shape, order=order)
+
+ def as_aligned(arr, align, dtype, order='C'):
+ aligned = aligned_array(arr.shape, align, dtype, order)
+ aligned[:] = arr[:]
+ return aligned
+
+ def assert_dot_close(A, X, desired):
+ assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
+
+ m = aligned_array(100, 15, np.float32)
+ s = aligned_array((100, 100), 15, np.float32)
+ np.dot(s, m) # this will always segfault if the bug is present
+
+ testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
+ for align, m, n, a_order in testdata:
+ # Calculation in double precision
+ A_d = np.random.rand(m, n)
+ X_d = np.random.rand(n)
+ desired = np.dot(A_d, X_d)
+ # Calculation with aligned single precision
+ A_f = as_aligned(A_d, align, np.float32, order=a_order)
+ X_f = as_aligned(X_d, align, np.float32)
+ assert_dot_close(A_f, X_f, desired)
+ # Strided A rows
+ A_d_2 = A_d[::2]
+ desired = np.dot(A_d_2, X_d)
+ A_f_2 = A_f[::2]
+ assert_dot_close(A_f_2, X_f, desired)
+ # Strided A columns, strided X vector
+ A_d_22 = A_d_2[:, ::2]
+ X_d_2 = X_d[::2]
+ desired = np.dot(A_d_22, X_d_2)
+ A_f_22 = A_f_2[:, ::2]
+ X_f_2 = X_f[::2]
+ assert_dot_close(A_f_22, X_f_2, desired)
+ # Check the strides are as expected
+ if a_order == 'F':
+ assert_equal(A_f_22.strides, (8, 8 * m))
+ else:
+ assert_equal(A_f_22.strides, (8 * n, 8))
+ assert_equal(X_f_2.strides, (8,))
+ # Strides in A rows + cols only
+ X_f_2c = as_aligned(X_f_2, align, np.float32)
+ assert_dot_close(A_f_22, X_f_2c, desired)
+ # Strides just in A cols
+ A_d_12 = A_d[:, ::2]
+ desired = np.dot(A_d_12, X_d_2)
+ A_f_12 = A_f[:, ::2]
+ assert_dot_close(A_f_12, X_f_2c, desired)
+ # Strides in A cols and X
+ assert_dot_close(A_f_12, X_f_2, desired)
+
+
+class MatmulCommon(object):
+ """Common tests for '@' operator and numpy.matmul.
+
+ """
+ # Should work with these types. Will want to add
+ # "O" at some point
+ types = "?bhilqBHILQefdgFDG"
+
+ def test_exceptions(self):
+ dims = [
+ ((1,), (2,)), # mismatched vector vector
+ ((2, 1,), (2,)), # mismatched matrix vector
+ ((2,), (1, 2)), # mismatched vector matrix
+ ((1, 2), (3, 1)), # mismatched matrix matrix
+ ((1,), ()), # vector scalar
+ ((), (1)), # scalar vector
+ ((1, 1), ()), # matrix scalar
+ ((), (1, 1)), # scalar matrix
+ ((2, 2, 1), (3, 1, 2)), # cannot broadcast
+ ]
+
+ for dt, (dm1, dm2) in itertools.product(self.types, dims):
+ a = np.ones(dm1, dtype=dt)
+ b = np.ones(dm2, dtype=dt)
+ assert_raises(ValueError, self.matmul, a, b)
+
+ def test_shapes(self):
+ dims = [
+ ((1, 1), (2, 1, 1)), # broadcast first argument
+ ((2, 1, 1), (1, 1)), # broadcast second argument
+ ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
+ ]
+
+ for dt, (dm1, dm2) in itertools.product(self.types, dims):
+ a = np.ones(dm1, dtype=dt)
+ b = np.ones(dm2, dtype=dt)
+ res = self.matmul(a, b)
+ assert_(res.shape == (2, 1, 1))
+
+ # vector vector returns scalars.
+ for dt in self.types:
+ a = np.ones((2,), dtype=dt)
+ b = np.ones((2,), dtype=dt)
+ c = self.matmul(a, b)
+ assert_(np.array(c).shape == ())
+
+ def test_result_types(self):
+ mat = np.ones((1,1))
+ vec = np.ones((1,))
+ for dt in self.types:
+ m = mat.astype(dt)
+ v = vec.astype(dt)
+ for arg in [(m, v), (v, m), (m, m)]:
+ res = self.matmul(*arg)
+ assert_(res.dtype == dt)
+
+ # vector vector returns scalars
+ res = self.matmul(v, v)
+ assert_(type(res) is np.dtype(dt).type)
+
+ def test_scalar_output(self):
+ vec1 = np.array([2])
+ vec2 = np.array([3, 4]).reshape(1, -1)
+ tgt = np.array([6, 8])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt)
+ res = self.matmul(v2.T, v1)
+ assert_equal(res, tgt)
+
+ # boolean type
+ vec = np.array([True, True], dtype='?').reshape(1, -1)
+ res = self.matmul(vec[:, 0], vec)
+ assert_equal(res, True)
+
+ def test_vector_vector_values(self):
+ vec1 = np.array([1, 2])
+ vec2 = np.array([3, 4]).reshape(-1, 1)
+ tgt1 = np.array([11])
+ tgt2 = np.array([[3, 6], [4, 8]])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt1)
+ # no broadcast, we must make v1 into a 2d ndarray
+ res = self.matmul(v2, v1.reshape(1, -1))
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, True], dtype='?')
+ res = self.matmul(vec, vec)
+ assert_equal(res, True)
+
+ def test_vector_matrix_values(self):
+ vec = np.array([1, 2])
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([7, 10])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+ for dt in self.types[1:]:
+ v = vec.astype(dt)
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ res = self.matmul(v, m1)
+ assert_equal(res, tgt1)
+ res = self.matmul(v, m2)
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, False])
+ mat1 = np.array([[True, False], [False, True]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([True, False])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+
+ res = self.matmul(vec, mat1)
+ assert_equal(res, tgt1)
+ res = self.matmul(vec, mat2)
+ assert_equal(res, tgt2)
+
+ def test_matrix_vector_values(self):
+ vec = np.array([1, 2])
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([5, 11])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+ for dt in self.types[1:]:
+ v = vec.astype(dt)
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ res = self.matmul(m1, v)
+ assert_equal(res, tgt1)
+ res = self.matmul(m2, v)
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, False])
+ mat1 = np.array([[True, False], [False, True]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([True, False])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+
+ res = self.matmul(vec, mat1)
+ assert_equal(res, tgt1)
+ res = self.matmul(vec, mat2)
+ assert_equal(res, tgt2)
+
+ def test_matrix_matrix_values(self):
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.array([[1, 0], [1, 1]])
+ mat12 = np.stack([mat1, mat2], axis=0)
+ mat21 = np.stack([mat2, mat1], axis=0)
+ tgt11 = np.array([[7, 10], [15, 22]])
+ tgt12 = np.array([[3, 2], [7, 4]])
+ tgt21 = np.array([[1, 2], [4, 6]])
+ tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+ tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+ tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+ for dt in self.types[1:]:
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ m12 = mat12.astype(dt)
+ m21 = mat21.astype(dt)
+
+ # matrix @ matrix
+ res = self.matmul(m1, m2)
+ assert_equal(res, tgt12)
+ res = self.matmul(m2, m1)
+ assert_equal(res, tgt21)
+
+ # stacked @ matrix
+ res = self.matmul(m12, m1)
+ assert_equal(res, tgt11_21)
+
+ # matrix @ stacked
+ res = self.matmul(m1, m12)
+ assert_equal(res, tgt11_12)
+
+ # stacked @ stacked
+ res = self.matmul(m12, m21)
+ assert_equal(res, tgt12_21)
+
+ # boolean type
+ m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
+ m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
+ m12 = np.stack([m1, m2], axis=0)
+ m21 = np.stack([m2, m1], axis=0)
+ tgt11 = m1
+ tgt12 = m1
+ tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
+ tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+ tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+ tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+
+ # matrix @ matrix
+ res = self.matmul(m1, m2)
+ assert_equal(res, tgt12)
+ res = self.matmul(m2, m1)
+ assert_equal(res, tgt21)
+
+ # stacked @ matrix
+ res = self.matmul(m12, m1)
+ assert_equal(res, tgt11_21)
+
+ # matrix @ stacked
+ res = self.matmul(m1, m12)
+ assert_equal(res, tgt11_12)
+
+ # stacked @ stacked
+ res = self.matmul(m12, m21)
+ assert_equal(res, tgt12_21)
+
+
+class TestMatmul(MatmulCommon):
+ matmul = np.matmul
+
+ def test_out_arg(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ tgt = np.dot(a, b)
+
+ # test as positional argument
+ msg = "out positional argument"
+ out = np.zeros((5, 2), dtype=float)
+ self.matmul(a, b, out)
+ assert_array_equal(out, tgt, err_msg=msg)
+
+ # test as keyword argument
+ msg = "out keyword argument"
+ out = np.zeros((5, 2), dtype=float)
+ self.matmul(a, b, out=out)
+ assert_array_equal(out, tgt, err_msg=msg)
+
+ # test out with not allowed type cast (safe casting)
+ msg = "Cannot cast ufunc matmul output"
+ out = np.zeros((5, 2), dtype=np.int32)
+ assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+ # test out with type upcast to complex
+ out = np.zeros((5, 2), dtype=np.complex128)
+ c = self.matmul(a, b, out=out)
+ assert_(c is out)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, '')
+ c = c.astype(tgt.dtype)
+ assert_array_equal(c, tgt)
+
+ def test_out_contiguous(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ v = np.array([1, 3], dtype=float)
+ tgt = np.dot(a, b)
+ tgt_mv = np.dot(a, v)
+
+ # test out non-contiguous
+ out = np.ones((5, 2, 2), dtype=float)
+ c = self.matmul(a, b, out=out[..., 0])
+ assert c.base is out
+ assert_array_equal(c, tgt)
+ c = self.matmul(a, v, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+ c = self.matmul(v, a.T, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+
+ # test out contiguous in only last dim
+ out = np.ones((10, 2), dtype=float)
+ c = self.matmul(a, b, out=out[::2, :])
+ assert_array_equal(c, tgt)
+
+ # test transposes of out, args
+ out = np.ones((5, 2), dtype=float)
+ c = self.matmul(b.T, a.T, out=out.T)
+ assert_array_equal(out, tgt)
+
+ m1 = np.arange(15.).reshape(5, 3)
+ m2 = np.arange(21.).reshape(3, 7)
+ m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
+ vc = np.arange(10.)
+ vr = np.arange(6.)
+ m0 = np.zeros((3, 0))
+ @pytest.mark.parametrize('args', (
+ # matrix-matrix
+ (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+ # matrix-matrix-transpose, contiguous and non
+ (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+ (m3, m3.T), (m3.T, m3),
+ # matrix-matrix non-contiguous
+ (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+ # vector-matrix, matrix-vector, contiguous
+ (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+ # vector-matrix, matrix-vector, vector non-contiguous
+ (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+ # vector-matrix, matrix-vector, matrix non-contiguous
+ (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+ # vector-matrix, matrix-vector, both non-contiguous
+ (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+ # size == 0
+ (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+ ))
+ def test_dot_equivalent(self, args):
+ r1 = np.matmul(*args)
+ r2 = np.dot(*args)
+ assert_equal(r1, r2)
+
+ r3 = np.matmul(args[0].copy(), args[1].copy())
+ assert_equal(r1, r3)
+
+
+
+if sys.version_info[:2] >= (3, 5):
+ class TestMatmulOperator(MatmulCommon):
+ import operator
+ matmul = operator.matmul
+
+ def test_array_priority_override(self):
+
+ class A(object):
+ __array_priority__ = 1000
+
+ def __matmul__(self, other):
+ return "A"
+
+ def __rmatmul__(self, other):
+ return "A"
+
+ a = A()
+ b = np.ones(2)
+ assert_equal(self.matmul(a, b), "A")
+ assert_equal(self.matmul(b, a), "A")
+
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+
+ def test_matmul_inplace():
+ # It would be nice to support in-place matmul eventually, but for now
+ # we don't have a working implementation, so better just to error out
+ # and nudge people to writing "a = a @ b".
+ a = np.eye(3)
+ b = np.eye(3)
+ assert_raises(TypeError, a.__imatmul__, b)
+ import operator
+ assert_raises(TypeError, operator.imatmul, a, b)
+ # we avoid writing the token `exec` so as not to crash python 2's
+ # parser
+ exec_ = getattr(builtins, "exec")
+ assert_raises(TypeError, exec_, "a @= b", globals(), locals())
+
+ def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
+
+class TestInner(object):
+
+ def test_inner_type_mismatch(self):
+ c = 1.
+ A = np.array((1,1), dtype='i,i')
+
+ assert_raises(TypeError, np.inner, c, A)
+ assert_raises(TypeError, np.inner, A, c)
+
+ def test_inner_scalar_and_vector(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ vec = np.array([1, 2], dtype=dt)
+ desired = np.array([3, 6], dtype=dt)
+ assert_equal(np.inner(vec, sca), desired)
+ assert_equal(np.inner(sca, vec), desired)
+
+ def test_vecself(self):
+ # Ticket 844.
+ # Inner product of a vector with itself segfaults or give
+ # meaningless result
+ a = np.zeros(shape=(1, 80), dtype=np.float64)
+ p = np.inner(a, a)
+ assert_almost_equal(p, 0, decimal=14)
+
+ def test_inner_product_with_various_contiguities(self):
+ # github issue 6532
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ # check an inner product involving a matrix transpose
+ A = np.array([[1, 2], [3, 4]], dtype=dt)
+ B = np.array([[1, 3], [2, 4]], dtype=dt)
+ C = np.array([1, 1], dtype=dt)
+ desired = np.array([4, 6], dtype=dt)
+ assert_equal(np.inner(A.T, C), desired)
+ assert_equal(np.inner(C, A.T), desired)
+ assert_equal(np.inner(B, C), desired)
+ assert_equal(np.inner(C, B), desired)
+ # check a matrix product
+ desired = np.array([[7, 10], [15, 22]], dtype=dt)
+ assert_equal(np.inner(A, B), desired)
+ # check the syrk vs. gemm paths
+ desired = np.array([[5, 11], [11, 25]], dtype=dt)
+ assert_equal(np.inner(A, A), desired)
+ assert_equal(np.inner(A, A.copy()), desired)
+ # check an inner product involving an aliased and reversed view
+ a = np.arange(5).astype(dt)
+ b = a[::-1]
+ desired = np.array(10, dtype=dt).item()
+ assert_equal(np.inner(b, a), desired)
+
+ def test_3d_tensor(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ a = np.arange(24).reshape(2,3,4).astype(dt)
+ b = np.arange(24, 48).reshape(2,3,4).astype(dt)
+ desired = np.array(
+ [[[[ 158, 182, 206],
+ [ 230, 254, 278]],
+
+ [[ 566, 654, 742],
+ [ 830, 918, 1006]],
+
+ [[ 974, 1126, 1278],
+ [1430, 1582, 1734]]],
+
+ [[[1382, 1598, 1814],
+ [2030, 2246, 2462]],
+
+ [[1790, 2070, 2350],
+ [2630, 2910, 3190]],
+
+ [[2198, 2542, 2886],
+ [3230, 3574, 3918]]]],
+ dtype=dt
+ )
+ assert_equal(np.inner(a, b), desired)
+ assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
+
+
+class TestAlen(object):
+ def test_basic(self):
+ m = np.array([1, 2, 3])
+ assert_equal(np.alen(m), 3)
+
+ m = np.array([[1, 2, 3], [4, 5, 7]])
+ assert_equal(np.alen(m), 2)
+
+ m = [1, 2, 3]
+ assert_equal(np.alen(m), 3)
+
+ m = [[1, 2, 3], [4, 5, 7]]
+ assert_equal(np.alen(m), 2)
+
+ def test_singleton(self):
+ assert_equal(np.alen(5), 1)
+
+
+class TestChoose(object):
+ def setup(self):
+ self.x = 2*np.ones((3,), dtype=int)
+ self.y = 3*np.ones((3,), dtype=int)
+ self.x2 = 2*np.ones((2, 3), dtype=int)
+ self.y2 = 3*np.ones((2, 3), dtype=int)
+ self.ind = [0, 0, 1]
+
+ def test_basic(self):
+ A = np.choose(self.ind, (self.x, self.y))
+ assert_equal(A, [2, 2, 3])
+
+ def test_broadcast1(self):
+ A = np.choose(self.ind, (self.x2, self.y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ def test_broadcast2(self):
+ A = np.choose(self.ind, (self.x, self.y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+
+class TestRepeat(object):
+ def setup(self):
+ self.m = np.array([1, 2, 3, 4, 5, 6])
+ self.m_rect = self.m.reshape((2, 3))
+
+ def test_basic(self):
+ A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
+ assert_equal(A, [1, 2, 2, 2, 3,
+ 3, 4, 5, 6, 6])
+
+ def test_broadcast1(self):
+ A = np.repeat(self.m, 2)
+ assert_equal(A, [1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6])
+
+ def test_axis_spec(self):
+ A = np.repeat(self.m_rect, [2, 1], axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6]])
+
+ A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
+ assert_equal(A, [[1, 2, 2, 2, 3, 3],
+ [4, 5, 5, 5, 6, 6]])
+
+ def test_broadcast2(self):
+ A = np.repeat(self.m_rect, 2, axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6],
+ [4, 5, 6]])
+
+ A = np.repeat(self.m_rect, 2, axis=1)
+ assert_equal(A, [[1, 1, 2, 2, 3, 3],
+ [4, 4, 5, 5, 6, 6]])
+
+
+# TODO: test for multidimensional
+NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
+
+
+@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
+class TestNeighborhoodIter(object):
+ # Simple, 2d tests
+ def test_simple2d(self, dt):
+ # Test zero and one padding for simple data type
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
+ np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
+ np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
+ np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
+ np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
+ np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
+ np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ def test_mirror2d(self, dt):
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
+ np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Simple, 1d tests
+ def test_simple(self, dt):
+ # Test padding with constant values
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[4], NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ # Test mirror modes
+ def test_mirror(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[1], NEIGH_MODE['mirror'])
+ assert_([i.dtype == dt for i in l])
+ assert_array_equal(l, r)
+
+ # Circular mode
+ def test_circular(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+
+# Test stacking neighborhood iterators
+class TestStackedNeighborhoodIter(object):
+ # Simple, 1d test: stacking 2 constant-padded neigh iterators
+ def test_simple_const(self):
+ dt = np.float64
+ # Test zero and one padding for simple data type
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([1], dtype=dt),
+ np.array([2], dtype=dt),
+ np.array([3], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([1, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # mirror padding
+ def test_simple_mirror(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 1], dtype=dt),
+ np.array([1, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 3], dtype=dt),
+ np.array([3, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # circular padding
+ def test_simple_circular(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 3, 1], dtype=dt),
+ np.array([3, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 1], dtype=dt),
+ np.array([3, 1, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
+ # being strictly within the array
+ def test_simple_strict_within(self):
+ dt = np.float64
+ # Stacking zero on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+class TestWarnings(object):
+
+ def test_complex_warning(self):
+ x = np.array([1, 2])
+ y = np.array([1-2j, 1+2j])
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", np.ComplexWarning)
+ assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
+ assert_equal(x, [1, 2])
+
+
+class TestMinScalarType(object):
+
+ def test_usigned_shortshort(self):
+ dt = np.min_scalar_type(2**8-1)
+ wanted = np.dtype('uint8')
+ assert_equal(wanted, dt)
+
+ def test_usigned_short(self):
+ dt = np.min_scalar_type(2**16-1)
+ wanted = np.dtype('uint16')
+ assert_equal(wanted, dt)
+
+ def test_usigned_int(self):
+ dt = np.min_scalar_type(2**32-1)
+ wanted = np.dtype('uint32')
+ assert_equal(wanted, dt)
+
+ def test_usigned_longlong(self):
+ dt = np.min_scalar_type(2**63-1)
+ wanted = np.dtype('uint64')
+ assert_equal(wanted, dt)
+
+ def test_object(self):
+ dt = np.min_scalar_type(2**64)
+ wanted = np.dtype('O')
+ assert_equal(wanted, dt)
+
+
+from numpy.core._internal import _dtype_from_pep3118
+
+
+class TestPEP3118Dtype(object):
+ def _check(self, spec, wanted):
+ dt = np.dtype(wanted)
+ actual = _dtype_from_pep3118(spec)
+ assert_equal(actual, dt,
+ err_msg="spec %r != dtype %r" % (spec, wanted))
+
+ def test_native_padding(self):
+ align = np.dtype('i').alignment
+ for j in range(8):
+ if j == 0:
+ s = 'bi'
+ else:
+ s = 'b%dxi' % j
+ self._check('@'+s, {'f0': ('i1', 0),
+ 'f1': ('i', align*(1 + j//align))})
+ self._check('='+s, {'f0': ('i1', 0),
+ 'f1': ('i', 1+j)})
+
+ def test_native_padding_2(self):
+ # Native padding should work also for structs and sub-arrays
+ self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
+ self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
+
+ def test_trailing_padding(self):
+ # Trailing padding should be included, *and*, the item size
+ # should match the alignment if in aligned mode
+ align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return align*(1 + (n-1)//align)
+
+ base = dict(formats=['i'], names=['f0'])
+
+ self._check('ix', dict(itemsize=aligned(size + 1), **base))
+ self._check('ixx', dict(itemsize=aligned(size + 2), **base))
+ self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
+ self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
+ self._check('i7x', dict(itemsize=aligned(size + 7), **base))
+
+ self._check('^ix', dict(itemsize=size + 1, **base))
+ self._check('^ixx', dict(itemsize=size + 2, **base))
+ self._check('^ixxx', dict(itemsize=size + 3, **base))
+ self._check('^ixxxx', dict(itemsize=size + 4, **base))
+ self._check('^i7x', dict(itemsize=size + 7, **base))
+
+ def test_native_padding_3(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'),
+ ('sub', np.dtype('b,i')), ('c', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
+
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+ ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+ self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
+
+ def test_padding_with_array_inside_struct(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
+ ('d', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
+
+ def test_byteorder_inside_struct(self):
+ # The byte order after @T{=i} should be '=', not '@'.
+ # Check this by noting the absence of native alignment.
+ self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
+ 'f1': ('i', 5)})
+
+ def test_intra_padding(self):
+ # Natively aligned sub-arrays may require some internal padding
+ align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return (align*(1 + (n-1)//align))
+
+ self._check('(3)T{ix}', (dict(
+ names=['f0'],
+ formats=['i'],
+ offsets=[0],
+ itemsize=aligned(size + 1)
+ ), (3,)))
+
+ def test_char_vs_string(self):
+ dt = np.dtype('c')
+ self._check('c', dt)
+
+ dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
+ self._check('4c4s', dt)
+
+ def test_field_order(self):
+ # gh-9053 - previously, we relied on dictionary key order
+ self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
+ self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
+
+ def test_unnamed_fields(self):
+ self._check('ii', [('f0', 'i'), ('f1', 'i')])
+ self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
+
+ self._check('i', 'i')
+ self._check('i:f0:', [('f0', 'i')])
+
+
+class TestNewBufferProtocol(object):
+ """ Test PEP3118 buffers """
+
+ def _check_roundtrip(self, obj):
+ obj = np.asarray(obj)
+ x = memoryview(obj)
+ y = np.asarray(x)
+ y2 = np.array(x)
+ assert_(not y.flags.owndata)
+ assert_(y2.flags.owndata)
+
+ assert_equal(y.dtype, obj.dtype)
+ assert_equal(y.shape, obj.shape)
+ assert_array_equal(obj, y)
+
+ assert_equal(y2.dtype, obj.dtype)
+ assert_equal(y2.shape, obj.shape)
+ assert_array_equal(obj, y2)
+
+ def test_roundtrip(self):
+ x = np.array([1, 2, 3, 4, 5], dtype='i4')
+ self._check_roundtrip(x)
+
+ x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ self._check_roundtrip(x)
+
+ x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+ self._check_roundtrip(x)
+
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ b'aaaa', 'bbbb', b'xxx', True, 1.0)],
+ dtype=dt)
+ self._check_roundtrip(x)
+
+ x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='>i2')
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='')
+ x = np.zeros(4, dtype=dt)
+ self._check_roundtrip(x)
+
+ def test_roundtrip_scalar(self):
+ # Issue #4015.
+ self._check_roundtrip(0)
+
+ def test_invalid_buffer_format(self):
+ # datetime64 cannot be used fully in a buffer yet
+ # Should be fixed in the next Numpy major release
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(3, dt)
+ assert_raises((ValueError, BufferError), memoryview, a)
+ assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
+
+ def test_export_simple_1d(self):
+ x = np.array([1, 2, 3, 4, 5], dtype='i')
+ y = memoryview(x)
+ assert_equal(y.format, 'i')
+ assert_equal(y.shape, (5,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.strides, (4,))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 4)
+
+ def test_export_simple_nd(self):
+ x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ y = memoryview(x)
+ assert_equal(y.format, 'd')
+ assert_equal(y.shape, (2, 2))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (16, 8))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 8)
+
+ def test_export_discontiguous(self):
+ x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+ y = memoryview(x)
+ assert_equal(y.format, 'f')
+ assert_equal(y.shape, (3, 3))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (36, 4))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 4)
+
+ def test_export_record(self):
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ b'aaaa', 'bbbb', b' ', True, 1.0)],
+ dtype=dt)
+ y = memoryview(x)
+ assert_equal(y.shape, (1,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.suboffsets, EMPTY)
+
+ sz = sum([np.dtype(b).itemsize for a, b in dt])
+ if np.dtype('l').itemsize == 4:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ else:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
+ if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
+ assert_equal(y.strides, (sz,))
+ assert_equal(y.itemsize, sz)
+
+ def test_export_subarray(self):
+ x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
+ y = memoryview(x)
+ assert_equal(y.format, 'T{(2,2)i:a:}')
+ assert_equal(y.shape, EMPTY)
+ assert_equal(y.ndim, 0)
+ assert_equal(y.strides, EMPTY)
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 16)
+
+ def test_export_endian(self):
+ x = np.array([1, 2, 3], dtype='>i')
+ y = memoryview(x)
+ if sys.byteorder == 'little':
+ assert_equal(y.format, '>i')
+ else:
+ assert_equal(y.format, 'i')
+
+ x = np.array([1, 2, 3], dtype=' 2:
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
+ raise exc.__cause__
+
+ def test_ctypes_integer_via_memoryview(self):
+ # gh-11150, due to bpo-10746
+ for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
+ value = c_integer(42)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ np.asarray(value)
+
+ def test_ctypes_struct_via_memoryview(self):
+ # gh-10528
+ class foo(ctypes.Structure):
+ _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
+ f = foo(a=1, b=2)
+
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ arr = np.asarray(f)
+
+ assert_equal(arr['a'], 1)
+ assert_equal(arr['b'], 2)
+ f.a = 3
+ assert_equal(arr['a'], 3)
+
+
+class TestArrayAttributeDeletion(object):
+
+ def test_multiarray_writable_attributes_deletion(self):
+ # ticket #2046, should not seqfault, raise AttributeError
+ a = np.ones(2)
+ attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_not_writable_attributes_deletion(self):
+ a = np.ones(2)
+ attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
+ "ctypes", "T", "__array_interface__", "__array_struct__",
+ "__array_priority__", "__array_finalize__"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_flags_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_flags_not_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
+ "owndata", "fnc", "forc", "behaved", "carray", "farray",
+ "num"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+class TestArrayInterface():
+ class Foo(object):
+ def __init__(self, value):
+ self.value = value
+ self.iface = {'typestr': 'f8'}
+
+ def __float__(self):
+ return float(self.value)
+
+ @property
+ def __array_interface__(self):
+ return self.iface
+
+
+ f = Foo(0.5)
+
+ @pytest.mark.parametrize('val, iface, expected', [
+ (f, {}, 0.5),
+ ([f], {}, [0.5]),
+ ([f, f], {}, [0.5, 0.5]),
+ (f, {'shape': ()}, 0.5),
+ (f, {'shape': None}, TypeError),
+ (f, {'shape': (1, 1)}, [[0.5]]),
+ (f, {'shape': (2,)}, ValueError),
+ (f, {'strides': ()}, 0.5),
+ (f, {'strides': (2,)}, ValueError),
+ (f, {'strides': 16}, TypeError),
+ ])
+ def test_scalar_interface(self, val, iface, expected):
+ # Test scalar coercion within the array interface
+ self.f.iface = {'typestr': 'f8'}
+ self.f.iface.update(iface)
+ if HAS_REFCOUNT:
+ pre_cnt = sys.getrefcount(np.dtype('f8'))
+ if isinstance(expected, type):
+ assert_raises(expected, np.array, val)
+ else:
+ result = np.array(val)
+ assert_equal(np.array(val), expected)
+ assert result.dtype == 'f8'
+ del result
+ if HAS_REFCOUNT:
+ post_cnt = sys.getrefcount(np.dtype('f8'))
+ assert_equal(pre_cnt, post_cnt)
+
+def test_interface_no_shape():
+ class ArrayLike(object):
+ array = np.array(1)
+ __array_interface__ = array.__array_interface__
+ assert_equal(np.array(ArrayLike()), 1)
+
+
+def test_array_interface_itemsize():
+ # See gh-6361
+ my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8], 'itemsize': 16})
+ a = np.ones(10, dtype=my_dtype)
+ descr_t = np.dtype(a.__array_interface__['descr'])
+ typestr_t = np.dtype(a.__array_interface__['typestr'])
+ assert_equal(descr_t.itemsize, typestr_t.itemsize)
+
+
+def test_array_interface_empty_shape():
+ # See gh-7994
+ arr = np.array([1, 2, 3])
+ interface1 = dict(arr.__array_interface__)
+ interface1['shape'] = ()
+
+ class DummyArray1(object):
+ __array_interface__ = interface1
+
+ # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
+ # the interface data to bytes would invoke the bug this tests for, that
+ # __array_interface__ with shape=() is not allowed if the data is an object
+ # exposing the buffer interface
+ interface2 = dict(interface1)
+ interface2['data'] = arr[0].tobytes()
+
+ class DummyArray2(object):
+ __array_interface__ = interface2
+
+ arr1 = np.asarray(DummyArray1())
+ arr2 = np.asarray(DummyArray2())
+ arr3 = arr[:1].reshape(())
+ assert_equal(arr1, arr2)
+ assert_equal(arr1, arr3)
+
+
+def test_flat_element_deletion():
+ it = np.ones(3).flat
+ try:
+ del it[1]
+ del it[1:2]
+ except TypeError:
+ pass
+ except Exception:
+ raise AssertionError
+
+
+def test_scalar_element_deletion():
+ a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
+ assert_raises(ValueError, a[0].__delitem__, 'x')
+
+
+class TestMemEventHook(object):
+ def test_mem_seteventhook(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+ _multiarray_tests.test_pydatamem_seteventhook_start()
+ # force an allocation and free of a numpy array
+ # needs to be larger then limit of small memory cacher in ctors.c
+ a = np.zeros(1000)
+ del a
+ gc.collect()
+ _multiarray_tests.test_pydatamem_seteventhook_end()
+
+class TestMapIter(object):
+ def test_mapiter(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+
+ a = np.arange(12).reshape((3, 4)).astype(float)
+ index = ([1, 1, 2, 0],
+ [0, 0, 2, 3])
+ vals = [50, 50, 30, 16]
+
+ _multiarray_tests.test_inplace_increment(a, index, vals)
+ assert_equal(a, [[0.00, 1., 2.0, 19.],
+ [104., 5., 6.0, 7.0],
+ [8.00, 9., 40., 11.]])
+
+ b = np.arange(6).astype(float)
+ index = (np.array([1, 2, 0]),)
+ vals = [50, 4, 100.1]
+ _multiarray_tests.test_inplace_increment(b, index, vals)
+ assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
+
+
+class TestAsCArray(object):
+ def test_1darray(self):
+ array = np.arange(24, dtype=np.double)
+ from_c = _multiarray_tests.test_as_c_array(array, 3)
+ assert_equal(array[3], from_c)
+
+ def test_2darray(self):
+ array = np.arange(24, dtype=np.double).reshape(3, 8)
+ from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
+ assert_equal(array[2, 4], from_c)
+
+ def test_3darray(self):
+ array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
+ from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
+ assert_equal(array[1, 2, 3], from_c)
+
+
+class TestConversion(object):
+ def test_array_scalar_relational_operation(self):
+ # All integer
+ for dt1 in np.typecodes['AllInteger']:
+ assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+
+ for dt2 in np.typecodes['AllInteger']:
+ assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ # Unsigned integers
+ for dt1 in 'BHILQP':
+ assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+
+ # Unsigned vs signed
+ for dt2 in 'bhilqp':
+ assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ # Signed integers and floats
+ for dt1 in 'bhlqp' + np.typecodes['Float']:
+ assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+
+ for dt2 in 'bhlqp' + np.typecodes['Float']:
+ assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ def test_to_bool_scalar(self):
+ assert_equal(bool(np.array([False])), False)
+ assert_equal(bool(np.array([True])), True)
+ assert_equal(bool(np.array([[42]])), True)
+ assert_raises(ValueError, bool, np.array([1, 2]))
+
+ class NotConvertible(object):
+ def __bool__(self):
+ raise NotImplementedError
+ __nonzero__ = __bool__ # python 2
+
+ assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
+ assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
+
+ self_containing = np.array([None])
+ self_containing[0] = self_containing
+ try:
+ Error = RecursionError
+ except NameError:
+ Error = RuntimeError # python < 3.5
+ assert_raises(Error, bool, self_containing) # previously stack overflow
+ self_containing[0] = None # resolve circular reference
+
+ def test_to_int_scalar(self):
+ # gh-9972 means that these aren't always the same
+ int_funcs = (int, lambda x: x.__int__())
+ for int_func in int_funcs:
+ assert_equal(int_func(np.array([1])), 1)
+ assert_equal(int_func(np.array([0])), 0)
+ assert_equal(int_func(np.array([[42]])), 42)
+ assert_raises(TypeError, int_func, np.array([1, 2]))
+
+ # gh-9972
+ assert_equal(4, int_func(np.array('4')))
+ assert_equal(5, int_func(np.bytes_(b'5')))
+ assert_equal(6, int_func(np.unicode_(u'6')))
+
+ class HasTrunc:
+ def __trunc__(self):
+ return 3
+ assert_equal(3, int_func(np.array(HasTrunc())))
+ assert_equal(3, int_func(np.array([HasTrunc()])))
+
+ class NotConvertible(object):
+ def __int__(self):
+ raise NotImplementedError
+ assert_raises(NotImplementedError,
+ int_func, np.array(NotConvertible()))
+ assert_raises(NotImplementedError,
+ int_func, np.array([NotConvertible()]))
+
+
+class TestWhere(object):
+ def test_basic(self):
+ dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+ np.longdouble, np.clongdouble]
+ for dt in dts:
+ c = np.ones(53, dtype=bool)
+ assert_equal(np.where( c, dt(0), dt(1)), dt(0))
+ assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
+ assert_equal(np.where(True, dt(0), dt(1)), dt(0))
+ assert_equal(np.where(False, dt(0), dt(1)), dt(1))
+ d = np.ones_like(c).astype(dt)
+ e = np.zeros_like(d)
+ r = d.astype(dt)
+ c[7] = False
+ r[7] = e[7]
+ assert_equal(np.where(c, e, e), e)
+ assert_equal(np.where(c, d, e), r)
+ assert_equal(np.where(c, d, e[0]), r)
+ assert_equal(np.where(c, d[0], e), r)
+ assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
+ assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
+ assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
+ assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
+ assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
+ assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
+ assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
+
+ def test_exotic(self):
+ # object
+ assert_array_equal(np.where(True, None, None), np.array(None))
+ # zero sized
+ m = np.array([], dtype=bool).reshape(0, 3)
+ b = np.array([], dtype=np.float64).reshape(0, 3)
+ assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
+
+ # object cast
+ d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
+ 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
+ 1.267, 0.229, -1.39, 0.487])
+ nan = float('NaN')
+ e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
+ 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
+ dtype=object)
+ m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
+
+ r = e[:]
+ r[np.where(m)] = d[np.where(m)]
+ assert_array_equal(np.where(m, d, e), r)
+
+ r = e[:]
+ r[np.where(~m)] = d[np.where(~m)]
+ assert_array_equal(np.where(m, e, d), r)
+
+ assert_array_equal(np.where(m, e, e), e)
+
+ # minimal dtype result with NaN scalar (e.g required by pandas)
+ d = np.array([1., 2.], dtype=np.float32)
+ e = float('NaN')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ e = float('Infinity')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ e = float('-Infinity')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ # also check upcast
+ e = float(1e150)
+ assert_equal(np.where(True, d, e).dtype, np.float64)
+
+ def test_ndim(self):
+ c = [True, False]
+ a = np.zeros((2, 25))
+ b = np.ones((2, 25))
+ r = np.where(np.array(c)[:,np.newaxis], a, b)
+ assert_array_equal(r[0], a[0])
+ assert_array_equal(r[1], b[0])
+
+ a = a.T
+ b = b.T
+ r = np.where(c, a, b)
+ assert_array_equal(r[:,0], a[:,0])
+ assert_array_equal(r[:,1], b[:,0])
+
+ def test_dtype_mix(self):
+ c = np.array([False, True, False, False, False, False, True, False,
+ False, False, True, False])
+ a = np.uint32(1)
+ b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+ dtype=np.float64)
+ r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+ dtype=np.float64)
+ assert_equal(np.where(c, a, b), r)
+
+ a = a.astype(np.float32)
+ b = b.astype(np.int64)
+ assert_equal(np.where(c, a, b), r)
+
+ # non bool mask
+ c = c.astype(int)
+ c[c != 0] = 34242324
+ assert_equal(np.where(c, a, b), r)
+ # invert
+ tmpmask = c != 0
+ c[c == 0] = 41247212
+ c[tmpmask] = 0
+ assert_equal(np.where(c, b, a), r)
+
+ def test_foreign(self):
+ c = np.array([False, True, False, False, False, False, True, False,
+ False, False, True, False])
+ r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+ dtype=np.float64)
+ a = np.ones(1, dtype='>i4')
+ b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+ dtype=np.float64)
+ assert_equal(np.where(c, a, b), r)
+
+ b = b.astype('>f8')
+ assert_equal(np.where(c, a, b), r)
+
+ a = a.astype('i4')
+ assert_equal(np.where(c, a, b), r)
+
+ def test_error(self):
+ c = [True, True]
+ a = np.ones((4, 5))
+ b = np.ones((5, 5))
+ assert_raises(ValueError, np.where, c, a, a)
+ assert_raises(ValueError, np.where, c[0], a, b)
+
+ def test_string(self):
+ # gh-4778 check strings are properly filled with nulls
+ a = np.array("abc")
+ b = np.array("x" * 753)
+ assert_equal(np.where(True, a, b), "abc")
+ assert_equal(np.where(False, b, a), "abc")
+
+ # check native datatype sized strings
+ a = np.array("abcd")
+ b = np.array("x" * 8)
+ assert_equal(np.where(True, a, b), "abcd")
+ assert_equal(np.where(False, b, a), "abcd")
+
+ def test_empty_result(self):
+ # pass empty where result through an assignment which reads the data of
+ # empty arrays, error detectable with valgrind, see gh-8922
+ x = np.zeros((1, 1))
+ ibad = np.vstack(np.where(x == 99.))
+ assert_array_equal(ibad,
+ np.atleast_2d(np.array([[],[]], dtype=np.intp)))
+
+ def test_largedim(self):
+ # invalid read regression gh-9304
+ shape = [10, 2, 3, 4, 5, 6]
+ np.random.seed(2)
+ array = np.random.rand(*shape)
+
+ for i in range(10):
+ benchmark = array.nonzero()
+ result = array.nonzero()
+ assert_array_equal(benchmark, result)
+
+
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf(object):
+
+ def test_empty_array(self):
+ x = np.array([])
+ assert_(sys.getsizeof(x) > 0)
+
+ def check_array(self, dtype):
+ elem_size = dtype(0).itemsize
+
+ for length in [10, 50, 100, 500]:
+ x = np.arange(length, dtype=dtype)
+ assert_(sys.getsizeof(x) > length * elem_size)
+
+ def test_array_int32(self):
+ self.check_array(np.int32)
+
+ def test_array_int64(self):
+ self.check_array(np.int64)
+
+ def test_array_float32(self):
+ self.check_array(np.float32)
+
+ def test_array_float64(self):
+ self.check_array(np.float64)
+
+ def test_view(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
+
+ def test_reshape(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+
+ def test_resize(self):
+ d = np.ones(100)
+ old = sys.getsizeof(d)
+ d.resize(50)
+ assert_(old > sys.getsizeof(d))
+ d.resize(150)
+ assert_(old < sys.getsizeof(d))
+
+ def test_error(self):
+ d = np.ones(100)
+ assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestHashing(object):
+
+ def test_arrays_not_hashable(self):
+ x = np.ones(3)
+ assert_raises(TypeError, hash, x)
+
+ def test_collections_hashable(self):
+ x = np.array([])
+ assert_(not isinstance(x, collections_abc.Hashable))
+
+
+class TestArrayPriority(object):
+ # This will go away when __array_priority__ is settled, meanwhile
+ # it serves to check unintended changes.
+ op = operator
+ binary_ops = [
+ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
+ op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
+ op.ge, op.lt, op.le, op.ne, op.eq
+ ]
+
+ # See #7949. Don't use "/" operator With -3 switch, since python reports it
+ # as a DeprecationWarning
+ if sys.version_info[0] < 3 and not sys.py3kwarning:
+ binary_ops.append(op.div)
+
+ class Foo(np.ndarray):
+ __array_priority__ = 100.
+
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ class Bar(np.ndarray):
+ __array_priority__ = 101.
+
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ class Other(object):
+ __array_priority__ = 1000.
+
+ def _all(self, other):
+ return self.__class__()
+
+ __add__ = __radd__ = _all
+ __sub__ = __rsub__ = _all
+ __mul__ = __rmul__ = _all
+ __pow__ = __rpow__ = _all
+ __div__ = __rdiv__ = _all
+ __mod__ = __rmod__ = _all
+ __truediv__ = __rtruediv__ = _all
+ __floordiv__ = __rfloordiv__ = _all
+ __and__ = __rand__ = _all
+ __xor__ = __rxor__ = _all
+ __or__ = __ror__ = _all
+ __lshift__ = __rlshift__ = _all
+ __rshift__ = __rrshift__ = _all
+ __eq__ = _all
+ __ne__ = _all
+ __gt__ = _all
+ __ge__ = _all
+ __lt__ = _all
+ __le__ = _all
+
+ def test_ndarray_subclass(self):
+ a = np.array([1, 2])
+ b = self.Bar([1, 2])
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Bar), msg)
+ assert_(isinstance(f(b, a), self.Bar), msg)
+
+ def test_ndarray_other(self):
+ a = np.array([1, 2])
+ b = self.Other()
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Other), msg)
+ assert_(isinstance(f(b, a), self.Other), msg)
+
+ def test_subclass_subclass(self):
+ a = self.Foo([1, 2])
+ b = self.Bar([1, 2])
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Bar), msg)
+ assert_(isinstance(f(b, a), self.Bar), msg)
+
+ def test_subclass_other(self):
+ a = self.Foo([1, 2])
+ b = self.Other()
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Other), msg)
+ assert_(isinstance(f(b, a), self.Other), msg)
+
+
+class TestBytestringArrayNonzero(object):
+
+ def test_empty_bstring_array_is_falsey(self):
+ assert_(not np.array([''], dtype=str))
+
+ def test_whitespace_bstring_array_is_falsey(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = ' \0\0'
+ assert_(not a)
+
+ def test_all_null_bstring_array_is_falsey(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = '\0\0\0\0'
+ assert_(not a)
+
+ def test_null_inside_bstring_array_is_truthy(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = ' \0 \0'
+ assert_(a)
+
+
+class TestUnicodeArrayNonzero(object):
+
+ def test_empty_ustring_array_is_falsey(self):
+ assert_(not np.array([''], dtype=np.unicode))
+
+ def test_whitespace_ustring_array_is_falsey(self):
+ a = np.array(['eggs'], dtype=np.unicode)
+ a[0] = ' \0\0'
+ assert_(not a)
+
+ def test_all_null_ustring_array_is_falsey(self):
+ a = np.array(['eggs'], dtype=np.unicode)
+ a[0] = '\0\0\0\0'
+ assert_(not a)
+
+ def test_null_inside_ustring_array_is_truthy(self):
+ a = np.array(['eggs'], dtype=np.unicode)
+ a[0] = ' \0 \0'
+ assert_(a)
+
+
+class TestFormat(object):
+
+ def test_0d(self):
+ a = np.array(np.pi)
+ assert_equal('{:0.3g}'.format(a), '3.14')
+ assert_equal('{:0.3g}'.format(a[()]), '3.14')
+
+ def test_1d_no_format(self):
+ a = np.array([np.pi])
+ assert_equal('{}'.format(a), str(a))
+
+ def test_1d_format(self):
+ # until gh-5543, ensure that the behaviour matches what it used to be
+ a = np.array([np.pi])
+ if sys.version_info[:2] >= (3, 4):
+ assert_raises(TypeError, '{:30}'.format, a)
+ else:
+ with suppress_warnings() as sup:
+ sup.filter(PendingDeprecationWarning)
+ res = '{:30}'.format(a)
+ dst = object.__format__(a, '30')
+ assert_equal(res, dst)
+
+class TestCTypes(object):
+
+ def test_ctypes_is_available(self):
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_equal(ctypes, test_arr.ctypes._ctypes)
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+
+ def test_ctypes_is_not_available(self):
+ from numpy.core import _internal
+ _internal.ctypes = None
+ try:
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_(isinstance(test_arr.ctypes._ctypes,
+ _internal._missing_ctypes))
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+ finally:
+ _internal.ctypes = ctypes
+
+ def _make_readonly(x):
+ x.flags.writeable = False
+ return x
+
+ @pytest.mark.parametrize('arr', [
+ np.array([1, 2, 3]),
+ np.array([['one', 'two'], ['three', 'four']]),
+ np.array((1, 2), dtype='i4,i4'),
+ np.zeros((2,), dtype=
+ np.dtype(dict(
+ formats=['2, [44, 55])
+ assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+ # hit one of the failing paths
+ assert_raises(ValueError, np.place, a, a>20, [])
+
+ def test_put_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ np.put(a, [0, 2], [44, 55])
+ assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
+
+ def test_putmask_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ # uses arr_putmask
+ np.putmask(a, a>2, a**2)
+ assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
+
+ def test_take_mode_raise(self):
+ a = np.arange(6, dtype='int')
+ out = np.empty(2, dtype='int')
+ np.take(a, [0, 2], out=out, mode='raise')
+ assert_equal(out, np.array([0, 2]))
+
+ def test_choose_mod_raise(self):
+ a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
+ out = np.empty((3,3), dtype='int')
+ choices = [-10, 10]
+ np.choose(a, choices, out=out, mode='raise')
+ assert_equal(out, np.array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]]))
+
+ def test_flatiter__array__(self):
+ a = np.arange(9).reshape(3,3)
+ b = a.T.flat
+ c = b.__array__()
+ # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
+ del c
+
+ def test_dot_out(self):
+ # if HAVE_CBLAS, will use WRITEBACKIFCOPY
+ a = np.arange(9, dtype=float).reshape(3,3)
+ b = np.dot(a, a, out=a)
+ assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
+
+ def test_view_assign(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
+ arr = np.arange(9).reshape(3, 3).T
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
+ assert_equal(arr, -100)
+ # after resolve, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, -100)
+
+ def test_dealloc_warning(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ _multiarray_tests.npy_abuse_writebackifcopy(v)
+ assert len(sup.log) == 1
+
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
+
+class TestArange(object):
+ def test_infinite(self):
+ assert_raises_regex(
+ ValueError, "size exceeded",
+ np.arange, 0, np.inf
+ )
+
+ def test_nan_step(self):
+ assert_raises_regex(
+ ValueError, "cannot compute length",
+ np.arange, 0, 1, np.nan
+ )
+
+ def test_zero_step(self):
+ assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
+
+ # empty range
+ assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
+
+
+class TestArrayFinalize(object):
+ """ Tests __array_finalize__ """
+
+ def test_receives_base(self):
+ # gh-11237
+ class SavesBase(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_base = self.base
+
+ a = np.array(1).view(SavesBase)
+ assert_(a.saved_base is a.base)
+
+ def test_lifetime_on_error(self):
+ # gh-11237
+ class RaisesInFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ # crash, but keep this object alive
+ raise Exception(self)
+
+ # a plain object can't be weakref'd
+ class Dummy(object): pass
+
+ # get a weak reference to an object within an array
+ obj_arr = np.array(Dummy())
+ obj_ref = weakref.ref(obj_arr[()])
+
+ # get an array that crashed in __array_finalize__
+ with assert_raises(Exception) as e:
+ obj_arr.view(RaisesInFinalize)
+ if sys.version_info.major == 2:
+ # prevent an extra reference being kept
+ sys.exc_clear()
+
+ obj_subarray = e.exception.args[0]
+ del e
+ assert_(isinstance(obj_subarray, RaisesInFinalize))
+
+ # reference should still be held by obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "object should not already be dead")
+
+ del obj_arr
+ gc.collect()
+ assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
+
+ del obj_subarray
+ gc.collect()
+ assert_(obj_ref() is None, "no references should remain")
+
+
+def test_orderconverter_with_nonASCII_unicode_ordering():
+ # gh-7475
+ a = np.arange(5)
+ assert_raises(ValueError, a.flatten, order=u'\xe2')
+
+
+def test_equal_override():
+ # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
+ # did not respect overrides with __array_priority__ or __array_ufunc__.
+ # The PR fixed this for __array_priority__ and __array_ufunc__ = None.
+ class MyAlwaysEqual(object):
+ def __eq__(self, other):
+ return "eq"
+
+ def __ne__(self, other):
+ return "ne"
+
+ class MyAlwaysEqualOld(MyAlwaysEqual):
+ __array_priority__ = 10000
+
+ class MyAlwaysEqualNew(MyAlwaysEqual):
+ __array_ufunc__ = None
+
+ array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
+ for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
+ my_always_equal = my_always_equal_cls()
+ assert_equal(my_always_equal == array, 'eq')
+ assert_equal(array == my_always_equal, 'eq')
+ assert_equal(my_always_equal != array, 'ne')
+ assert_equal(array != my_always_equal, 'ne')
+
+
+def test_npymath_complex():
+ # Smoketest npymath functions
+ from numpy.core._multiarray_tests import (
+ npy_cabs, npy_carg)
+
+ funcs = {npy_cabs: np.absolute,
+ npy_carg: np.angle}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.complex64, np.complex128, np.clongdouble)
+
+ for fun, npfun in funcs.items():
+ for x, y in itertools.product(vals, vals):
+ for t in types:
+ z = t(complex(x, y))
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
+def test_npymath_real():
+ # Smoketest npymath functions
+ from numpy.core._multiarray_tests import (
+ npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+ funcs = {npy_log10: np.log10,
+ npy_cosh: np.cosh,
+ npy_sinh: np.sinh,
+ npy_tan: np.tan,
+ npy_tanh: np.tanh}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.float32, np.float64, np.longdouble)
+
+ with np.errstate(all='ignore'):
+ for fun, npfun in funcs.items():
+ for x, t in itertools.product(vals, types):
+ z = t(x)
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+def test_uintalignment_and_alignment():
+ # alignment code needs to satisfy these requrements:
+ # 1. numpy structs match C struct layout
+ # 2. ufuncs/casting is safe wrt to aligned access
+ # 3. copy code is safe wrt to "uint alidned" access
+ #
+ # Complex types are the main problem, whose alignment may not be the same
+ # as their "uint alignment".
+ #
+ # This test might only fail on certain platforms, where uint64 alignment is
+ # not equal to complex64 alignment. The second 2 tests will only fail
+ # for DEBUG=1.
+
+ d1 = np.dtype('u1,c8', align=True)
+ d2 = np.dtype('u4,c8', align=True)
+ d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
+
+ assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
+
+ # check that C struct matches numpy struct size
+ s = _multiarray_tests.get_struct_alignments()
+ for d, (alignment, size) in zip([d1,d2,d3], s):
+ assert_equal(d.alignment, alignment)
+ assert_equal(d.itemsize, size)
+
+ # check that ufuncs don't complain in debug mode
+ # (this is probably OK if the aligned flag is true above)
+ src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
+ np.exp(src) # assert fails?
+
+ # check that copy code doesn't complain in debug mode
+ dst = np.zeros((2,2), dtype='c8')
+ dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
+
+class TestAlignment(object):
+ # adapted from scipy._lib.tests.test__util.test__aligned_zeros
+ # Checks that unusual memory alignments don't trip up numpy.
+ # In particular, check RELAXED_STRIDES don't trip alignment assertions in
+ # NDEBUG mode for size-0 arrays (gh-12503)
+
+ def check(self, shape, dtype, order, align):
+ err_msg = repr((shape, dtype, order, align))
+ x = _aligned_zeros(shape, dtype, order, align=align)
+ if align is None:
+ align = np.dtype(dtype).alignment
+ assert_equal(x.__array_interface__['data'][0] % align, 0)
+ if hasattr(shape, '__len__'):
+ assert_equal(x.shape, shape, err_msg)
+ else:
+ assert_equal(x.shape, (shape,), err_msg)
+ assert_equal(x.dtype, dtype)
+ if order == "C":
+ assert_(x.flags.c_contiguous, err_msg)
+ elif order == "F":
+ if x.size > 0:
+ assert_(x.flags.f_contiguous, err_msg)
+ elif order is None:
+ assert_(x.flags.c_contiguous, err_msg)
+ else:
+ raise ValueError()
+
+ def test_various_alignments(self):
+ for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
+ for n in [0, 1, 3, 11]:
+ for order in ["C", "F", None]:
+ for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
+ if dtype == 'O':
+ # object dtype can't be misaligned
+ continue
+ for shape in [n, (1, 2, 3, n)]:
+ self.check(shape, np.dtype(dtype), order, align)
+
+ def test_strided_loop_alignments(self):
+ # particularly test that complex64 and float128 use right alignment
+ # code-paths, since these are particularly problematic. It is useful to
+ # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
+ for align in [1, 2, 4, 8, 12, 16, None]:
+ xf64 = _aligned_zeros(3, np.float64)
+
+ xc64 = _aligned_zeros(3, np.complex64, align=align)
+ xf128 = _aligned_zeros(3, np.longdouble, align=align)
+
+ # test casting, both to and from misaligned
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, "Casting complex values")
+ xc64.astype('f8')
+ xf64.astype(np.complex64)
+ test = xc64 + xf64
+
+ xf128.astype('f8')
+ xf64.astype(np.longdouble)
+ test = xf128 + xf64
+
+ test = xf128 + xc64
+
+ # test copy, both to and from misaligned
+ # contig copy
+ xf64[:] = xf64.copy()
+ xc64[:] = xc64.copy()
+ xf128[:] = xf128.copy()
+ # strided copy
+ xf64[::2] = xf64[::2].copy()
+ xc64[::2] = xc64[::2].copy()
+ xf128[::2] = xf128[::2].copy()
+
+def test_getfield():
+ a = np.arange(32, dtype='uint16')
+ if sys.byteorder == 'little':
+ i = 0
+ j = 1
+ else:
+ i = 1
+ j = 0
+ b = a.getfield('int8', i)
+ assert_equal(b, a)
+ b = a.getfield('int8', j)
+ assert_equal(b, 0)
+ pytest.raises(ValueError, a.getfield, 'uint8', -1)
+ pytest.raises(ValueError, a.getfield, 'uint8', 16)
+ pytest.raises(ValueError, a.getfield, 'uint64', 0)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc
new file mode 100644
index 0000000..bcf8596
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py
new file mode 100644
index 0000000..26fd9c3
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.py
@@ -0,0 +1,2861 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as _multiarray_tests
+from numpy import array, arange, nditer, all
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises,
+ HAS_REFCOUNT, suppress_warnings
+ )
+
+
+def iter_multi_index(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.multi_index)
+ i.iternext()
+ return ret
+
+def iter_indices(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.index)
+ i.iternext()
+ return ret
+
+def iter_iterindices(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.iterindex)
+ i.iternext()
+ return ret
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_iter_refcount():
+ # Make sure the iterator doesn't leak
+
+ # Basic
+ a = arange(6)
+ dt = np.dtype('f4').newbyteorder()
+ rc_a = sys.getrefcount(a)
+ rc_dt = sys.getrefcount(dt)
+ with nditer(a, [],
+ [['readwrite', 'updateifcopy']],
+ casting='unsafe',
+ op_dtypes=[dt]) as it:
+ assert_(not it.iterationneedsapi)
+ assert_(sys.getrefcount(a) > rc_a)
+ assert_(sys.getrefcount(dt) > rc_dt)
+ # del 'it'
+ it = None
+ assert_equal(sys.getrefcount(a), rc_a)
+ assert_equal(sys.getrefcount(dt), rc_dt)
+
+ # With a copy
+ a = arange(6, dtype='f4')
+ dt = np.dtype('f4')
+ rc_a = sys.getrefcount(a)
+ rc_dt = sys.getrefcount(dt)
+ it = nditer(a, [],
+ [['readwrite']],
+ op_dtypes=[dt])
+ rc2_a = sys.getrefcount(a)
+ rc2_dt = sys.getrefcount(dt)
+ it2 = it.copy()
+ assert_(sys.getrefcount(a) > rc2_a)
+ assert_(sys.getrefcount(dt) > rc2_dt)
+ it = None
+ assert_equal(sys.getrefcount(a), rc2_a)
+ assert_equal(sys.getrefcount(dt), rc2_dt)
+ it2 = None
+ assert_equal(sys.getrefcount(a), rc_a)
+ assert_equal(sys.getrefcount(dt), rc_dt)
+
+ del it2 # avoid pyflakes unused variable warning
+
+def test_iter_best_order():
+ # The iterator should always find the iteration order
+ # with increasing memory addresses
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, [], [['readonly']])
+ assert_equal([x for x in i], a)
+ # Fortran-order
+ i = nditer(aview.T, [], [['readonly']])
+ assert_equal([x for x in i], a)
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
+ assert_equal([x for x in i], a)
+
+def test_iter_c_order():
+ # Test forcing C order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='C')
+ assert_equal([x for x in i], aview.ravel(order='C'))
+ # Fortran-order
+ i = nditer(aview.T, order='C')
+ assert_equal([x for x in i], aview.T.ravel(order='C'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='C')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='C'))
+
+def test_iter_f_order():
+ # Test forcing F order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='F')
+ assert_equal([x for x in i], aview.ravel(order='F'))
+ # Fortran-order
+ i = nditer(aview.T, order='F')
+ assert_equal([x for x in i], aview.T.ravel(order='F'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='F')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='F'))
+
+def test_iter_c_or_f_order():
+ # Test forcing any contiguous (C or F) order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='A')
+ assert_equal([x for x in i], aview.ravel(order='A'))
+ # Fortran-order
+ i = nditer(aview.T, order='A')
+ assert_equal([x for x in i], aview.T.ravel(order='A'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='A')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='A'))
+
+def test_iter_best_order_multi_index_1d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
+ # 1D reversed order
+ i = nditer(a[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
+
+def test_iter_best_order_multi_index_2d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
+
+def test_iter_best_order_multi_index_3d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
+ (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
+ (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
+ (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
+ (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
+ (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
+ (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
+ (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
+ (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
+
+def test_iter_best_order_c_index_1d():
+ # The C index should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3])
+ # 1D reversed order
+ i = nditer(a[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_c_index_2d():
+ # The C index should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'),
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
+
+def test_iter_best_order_c_index_3d():
+ # The C index should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+
+def test_iter_best_order_f_index_1d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3])
+ # 1D reversed order
+ i = nditer(a[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_f_index_2d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'),
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+
+def test_iter_best_order_f_index_3d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+
+def test_iter_no_inner_full_coalesce():
+ # Check no_inner iterators which coalesce into a single inner loop
+
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ size = np.prod(shape)
+ a = arange(size)
+ # Test each combination of forward and backwards indexing
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+ # Fortran-order
+ i = nditer(aview.T, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1),
+ ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+
+def test_iter_no_inner_dim_coalescing():
+ # Check no_inner iterators whose dimensions may not coalesce completely
+
+ # Skipping the last element in a dimension prevents coalescing
+ # with the next-bigger dimension
+ a = arange(24).reshape(2, 3, 4)[:,:, :-1]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 2)
+ assert_equal(i[0].shape, (3,))
+ a = arange(24).reshape(2, 3, 4)[:, :-1,:]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 2)
+ assert_equal(i[0].shape, (8,))
+ a = arange(24).reshape(2, 3, 4)[:-1,:,:]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (12,))
+
+ # Even with lots of 1-sized dimensions, should still coalesce
+ a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (24,))
+
+def test_iter_dim_coalescing():
+ # Check that the correct number of dimensions are coalesced
+
+ # Tracking a multi-index disables coalescing
+ a = arange(24).reshape(2, 3, 4)
+ i = nditer(a, ['multi_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+
+ # A tracked index can allow coalescing if it's compatible with the array
+ a3d = arange(24).reshape(2, 3, 4)
+ i = nditer(a3d, ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, ['f_index'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+
+ # When C or F order is forced, coalescing may still occur
+ a3d = arange(24).reshape(2, 3, 4)
+ i = nditer(a3d, order='C')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T, order='C')
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d, order='F')
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, order='F')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d, order='A')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T, order='A')
+ assert_equal(i.ndim, 1)
+
+def test_iter_broadcasting():
+ # Standard NumPy broadcasting rules
+
+ # 1D with scalar
+ i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (6,))
+
+ # 2D with scalar
+ i = nditer([arange(6).reshape(2, 3), np.int32(2)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ # 2D with 1D
+ i = nditer([arange(6).reshape(2, 3), arange(3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ i = nditer([arange(2).reshape(2, 1), arange(3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ # 2D with 2D
+ i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+
+ # 3D with scalar
+ i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 1D
+ i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 2D
+ i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 3D
+ i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
+ arange(4).reshape(4, 1, 1)],
+ ['multi_index'], [['readonly']]*3)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+
+def test_iter_itershape():
+ # Check that allocated outputs work with a specified shape
+ a = np.arange(6, dtype='i2').reshape(2, 3)
+ i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (2, 3, 4))
+ assert_equal(i.operands[1].strides, (24, 8, 2))
+
+ i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (3, 2, 4))
+ assert_equal(i.operands[1].strides, (8, 24, 2))
+
+ i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+ order='F',
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (3, 2, 4))
+ assert_equal(i.operands[1].strides, (2, 6, 12))
+
+ # If we specify 1 in the itershape, it shouldn't allow broadcasting
+ # of that dimension to a bigger value
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, 1, 4))
+ # Test bug that for no op_axes but itershape, they are NULLed correctly
+ i = np.nditer([np.ones(2), None, None], itershape=(2,))
+
+def test_iter_broadcasting_errors():
+ # Check that errors are thrown for bad broadcasting shapes
+
+ # 1D with 1D
+ assert_raises(ValueError, nditer, [arange(2), arange(3)],
+ [], [['readonly']]*2)
+ # 2D with 1D
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(2)],
+ [], [['readonly']]*2)
+ # 2D with 2D
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
+ [], [['readonly']]*2)
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
+ [], [['readonly']]*2)
+ # 3D with 3D
+ assert_raises(ValueError, nditer,
+ [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
+ [], [['readonly']]*2)
+ assert_raises(ValueError, nditer,
+ [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
+ [], [['readonly']]*2)
+
+ # Verify that the error message mentions the right shapes
+ try:
+ nditer([arange(2).reshape(1, 2, 1),
+ arange(3).reshape(1, 3),
+ arange(6).reshape(2, 3)],
+ [],
+ [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain the shape of the 3rd operand
+ assert_(msg.find('(2,3)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
+ # The message should contain the broadcast shape
+ assert_(msg.find('(1,2,3)') >= 0,
+ 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
+
+ try:
+ nditer([arange(6).reshape(2, 3), arange(2)],
+ [],
+ [['readonly'], ['readonly']],
+ op_axes=[[0, 1], [0, np.newaxis]],
+ itershape=(4, 3))
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain "shape->remappedshape" for each operand
+ assert_(msg.find('(2,3)->(2,3)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
+ assert_(msg.find('(2,)->(2,newaxis)') >= 0,
+ ('Message "%s" doesn\'t contain remapped operand shape' +
+ '(2,)->(2,newaxis)') % msg)
+ # The message should contain the itershape parameter
+ assert_(msg.find('(4,3)') >= 0,
+ 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
+
+ try:
+ nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
+ [],
+ [['writeonly', 'no_broadcast'], ['readonly']])
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain the shape of the bad operand
+ assert_(msg.find('(2,1,1)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
+ # The message should contain the broadcast shape
+ assert_(msg.find('(2,1,2)') >= 0,
+ 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
+
+def test_iter_flags_errors():
+ # Check that bad combinations of flags produce errors
+
+ a = arange(6)
+
+ # Not enough operands
+ assert_raises(ValueError, nditer, [], [], [])
+ # Too many operands
+ assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
+ # Bad global flag
+ assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
+ # Bad op flag
+ assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
+ # Bad order parameter
+ assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
+ # Bad casting parameter
+ assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
+ # op_flags must match ops
+ assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
+ # Cannot track both a C and an F index
+ assert_raises(ValueError, nditer, a,
+ ['c_index', 'f_index'], [['readonly']])
+ # Inner iteration and multi-indices/indices are incompatible
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'multi_index'], [['readonly']])
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'c_index'], [['readonly']])
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'f_index'], [['readonly']])
+ # Must specify exactly one of readwrite/readonly/writeonly per operand
+ assert_raises(ValueError, nditer, a, [], [[]])
+ assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
+ assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
+ assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
+ assert_raises(ValueError, nditer, a,
+ [], [['readonly', 'writeonly', 'readwrite']])
+ # Python scalars are always readonly
+ assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
+ assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
+ # Array scalars are always readonly
+ assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
+ assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
+ # Check readonly array
+ a.flags.writeable = False
+ assert_raises(ValueError, nditer, a, [], [['writeonly']])
+ assert_raises(ValueError, nditer, a, [], [['readwrite']])
+ a.flags.writeable = True
+ # Multi-indices available only with the multi_index flag
+ i = nditer(arange(6), [], [['readonly']])
+ assert_raises(ValueError, lambda i:i.multi_index, i)
+ # Index available only with an index flag
+ assert_raises(ValueError, lambda i:i.index, i)
+ # GotoCoords and GotoIndex incompatible with buffering or no_inner
+
+ def assign_multi_index(i):
+ i.multi_index = (0,)
+
+ def assign_index(i):
+ i.index = 0
+
+ def assign_iterindex(i):
+ i.iterindex = 0
+
+ def assign_iterrange(i):
+ i.iterrange = (0, 1)
+ i = nditer(arange(6), ['external_loop'])
+ assert_raises(ValueError, assign_multi_index, i)
+ assert_raises(ValueError, assign_index, i)
+ assert_raises(ValueError, assign_iterindex, i)
+ assert_raises(ValueError, assign_iterrange, i)
+ i = nditer(arange(6), ['buffered'])
+ assert_raises(ValueError, assign_multi_index, i)
+ assert_raises(ValueError, assign_index, i)
+ assert_raises(ValueError, assign_iterrange, i)
+ # Can't iterate if size is zero
+ assert_raises(ValueError, nditer, np.array([]))
+
+def test_iter_slice():
+ a, b, c = np.arange(3), np.arange(3), np.arange(3.)
+ i = nditer([a, b, c], [], ['readwrite'])
+ with i:
+ i[0:2] = (3, 3)
+ assert_equal(a, [3, 1, 2])
+ assert_equal(b, [3, 1, 2])
+ assert_equal(c, [0, 1, 2])
+ i[1] = 12
+ assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][...] = 3
+ it.operands[0][...] = 14
+ assert_equal(a, 14)
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0][-1:1]
+ x[...] = 14
+ it.operands[0][...] = -1234
+ assert_equal(a, -1234)
+ # check for no warnings on dealloc
+ x = None
+ it = None
+
+def test_iter_nbo_align_contig():
+ # Check that byte order, alignment, and contig changes work
+
+ # Byte order change by requesting a specific dtype
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ i = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv',
+ op_dtypes=[np.dtype('f4')])
+ with i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 2
+ assert_equal(au, [2]*6)
+ del i # should not raise a warning
+ # Byte order change by requesting NBO
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+ casting='equiv') as i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 12345
+ i.operands[0][:] = 2
+ assert_equal(au, [2]*6)
+
+ # Unaligned input
+ a = np.zeros((6*4+1,), dtype='i1')[1:]
+ a.dtype = 'f4'
+ a[:] = np.arange(6, dtype='f4')
+ assert_(not a.flags.aligned)
+ # Without 'aligned', shouldn't copy
+ i = nditer(a, [], [['readonly']])
+ assert_(not i.operands[0].flags.aligned)
+ assert_equal(i.operands[0], a)
+ # With 'aligned', should make a copy
+ with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+ assert_(i.operands[0].flags.aligned)
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 3
+ assert_equal(a, [3]*6)
+
+ # Discontiguous input
+ a = arange(12)
+ # If it is contiguous, shouldn't copy
+ i = nditer(a[:6], [], [['readonly']])
+ assert_(i.operands[0].flags.contiguous)
+ assert_equal(i.operands[0], a[:6])
+ # If it isn't contiguous, should buffer
+ i = nditer(a[::2], ['buffered', 'external_loop'],
+ [['readonly', 'contig']],
+ buffersize=10)
+ assert_(i[0].flags.contiguous)
+ assert_equal(i[0], a[::2])
+
+def test_iter_array_cast():
+ # Check that arrays are cast as requested
+
+ # No cast 'f4' -> 'f4'
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+ i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
+ with i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+
+ # Byte-order cast ' '>f4'
+ a = np.arange(6, dtype='f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+
+ # Safe case 'f4' -> 'f8'
+ a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
+ i = nditer(a, [], [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f8'))
+ # The memory layout of the temporary should match a (a is (48,4,16))
+ # except negative strides get flipped to positive strides.
+ assert_equal(i.operands[0].strides, (96, 8, 32))
+ a = a[::-1,:, ::-1]
+ i = nditer(a, [], [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f8'))
+ assert_equal(i.operands[0].strides, (96, 8, 32))
+
+ # Same-kind cast 'f8' -> 'f4' -> 'f8'
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ with nditer(a, [],
+ [['readwrite', 'updateifcopy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ assert_equal(i.operands[0].strides, (4, 16, 48))
+ # Check that WRITEBACKIFCOPY is activated at exit
+ i.operands[0][2, 1, 1] = -12.5
+ assert_(a[2, 1, 1] != -12.5)
+ assert_equal(a[2, 1, 1], -12.5)
+
+ a = np.arange(6, dtype='i4')[::-2]
+ with nditer(a, [],
+ [['writeonly', 'updateifcopy']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ # Even though the stride was negative in 'a', it
+ # becomes positive in the temporary
+ assert_equal(i.operands[0].strides, (4,))
+ i.operands[0][:] = [1, 2, 3]
+ assert_equal(a, [1, 2, 3])
+
+def test_iter_array_cast_errors():
+ # Check that invalid casts are caught
+
+ # Need to enable copying for casts to occur
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly']], op_dtypes=[np.dtype('f8')])
+ # Also need to allow casting for casts to occur
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']], casting='no',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']], casting='equiv',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='no',
+ op_dtypes=[np.dtype('f4')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='equiv',
+ op_dtypes=[np.dtype('f4')])
+ # ' '>f4' should not work with casting='no'
+ assert_raises(TypeError, nditer, arange(2, dtype='f4')])
+ # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readwrite', 'updateifcopy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['readwrite', 'updateifcopy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f4')])
+ # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('i4')])
+ assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')])
+
+def test_iter_scalar_cast():
+ # Check that scalars are cast as requested
+
+ # No cast 'f4' -> 'f4'
+ i = nditer(np.float32(2.5), [], [['readonly']],
+ op_dtypes=[np.dtype('f4')])
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.value.dtype, np.dtype('f4'))
+ assert_equal(i.value, 2.5)
+ # Safe cast 'f4' -> 'f8'
+ i = nditer(np.float32(2.5), [],
+ [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.value.dtype, np.dtype('f8'))
+ assert_equal(i.value, 2.5)
+ # Same-kind cast 'f8' -> 'f4'
+ i = nditer(np.float64(2.5), [],
+ [['readonly', 'copy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')])
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.value.dtype, np.dtype('f4'))
+ assert_equal(i.value, 2.5)
+ # Unsafe cast 'f8' -> 'i4'
+ i = nditer(np.float64(3.0), [],
+ [['readonly', 'copy']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('i4')])
+ assert_equal(i.dtypes[0], np.dtype('i4'))
+ assert_equal(i.value.dtype, np.dtype('i4'))
+ assert_equal(i.value, 3)
+ # Readonly scalars may be cast even without setting COPY or BUFFERED
+ i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
+ assert_equal(i[0].dtype, np.dtype('f8'))
+ assert_equal(i[0], 3.)
+
+def test_iter_scalar_cast_errors():
+ # Check that invalid casts are caught
+
+ # Need to allow copying/buffering for write casts of scalars to occur
+ assert_raises(TypeError, nditer, np.float32(2), [],
+ [['readwrite']], op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, 2.5, [],
+ [['readwrite']], op_dtypes=[np.dtype('f4')])
+ # 'f8' -> 'f4' isn't a safe cast if the value would overflow
+ assert_raises(TypeError, nditer, np.float64(1e60), [],
+ [['readonly']],
+ casting='safe',
+ op_dtypes=[np.dtype('f4')])
+ # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+ assert_raises(TypeError, nditer, np.float32(2), [],
+ [['readonly']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('i4')])
+
+def test_iter_object_arrays_basic():
+ # Check that object arrays work
+
+ obj = {'a':3,'b':'d'}
+ a = np.array([[1, 2, 3], None, obj, None], dtype='O')
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(obj)
+
+ # Need to allow references for object arrays
+ assert_raises(TypeError, nditer, a)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a, ['refs_ok'], ['readonly'])
+ vals = [x_[()] for x_ in i]
+ assert_equal(np.array(vals, dtype='O'), a)
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+ ['readonly'], order='C')
+ assert_(i.iterationneedsapi)
+ vals = [x_[()] for x_ in i]
+ assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+ ['readwrite'], order='C')
+ with i:
+ for x in i:
+ x[...] = None
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(obj) == rc-1)
+ assert_equal(a, np.array([None]*4, dtype='O'))
+
+def test_iter_object_arrays_conversions():
+ # Conversions to/from objects
+ a = np.arange(6, dtype='O')
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='i4')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ a = np.arange(6, dtype='i4')
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='O')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ # Non-contiguous object array
+ a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
+ a = a['a']
+ a[:] = np.arange(6)
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='i4')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ #Non-contiguous value array
+ a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
+ a = a['a']
+ a[:] = np.arange(6) + 98172488
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='O')
+ with i:
+ ob = i[0][()]
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(ob)
+ for x in i:
+ x[...] += 1
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(ob) == rc-1)
+ assert_equal(a, np.arange(6)+98172489)
+
+def test_iter_common_dtype():
+ # Check that the iterator finds a common data type correctly
+
+ i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.dtypes[1], np.dtype('f8'))
+ i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.dtypes[1], np.dtype('f8'))
+ i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='same_kind')
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.dtypes[1], np.dtype('f4'))
+ i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('u4'))
+ assert_equal(i.dtypes[1], np.dtype('u4'))
+ i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('i8'))
+ assert_equal(i.dtypes[1], np.dtype('i8'))
+ i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
+ array([2j], dtype='c8'), array([9], dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*4,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('c16'))
+ assert_equal(i.dtypes[1], np.dtype('c16'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+ assert_equal(i.dtypes[3], np.dtype('c16'))
+ assert_equal(i.value, (3, -12, 2j, 9))
+
+ # When allocating outputs, other outputs aren't factored in
+ i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
+ [['readonly', 'copy'],
+ ['writeonly', 'allocate'],
+ ['writeonly']],
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('i4'))
+ assert_equal(i.dtypes[1], np.dtype('i4'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+ # But, if common data types are requested, they are
+ i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
+ ['common_dtype'],
+ [['readonly', 'copy'],
+ ['writeonly', 'allocate'],
+ ['writeonly']],
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('c16'))
+ assert_equal(i.dtypes[1], np.dtype('c16'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+
+def test_iter_copy_if_overlap():
+ # Ensure the iterator makes copies on read/write overlap, if requested
+
+ # Copy not needed, 1 op
+ for flag in ['readonly', 'writeonly', 'readwrite']:
+ a = arange(10)
+ i = nditer([a], ['copy_if_overlap'], [[flag]])
+ with i:
+ assert_(i.operands[0] is a)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(10)
+ a = x[1:]
+ b = x[:-1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy not needed with elementwise, 2 ops, exactly same arrays
+ x = arange(10)
+ a = x
+ b = x
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
+ ['readwrite', 'overlap_assume_elementwise']])
+ with i:
+ assert_(i.operands[0] is a and i.operands[1] is b)
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+
+ # Copy not needed, 2 ops, no overlap
+ x = arange(10)
+ a = x[::2]
+ b = x[1::2]
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
+ assert_(i.operands[0] is a and i.operands[1] is b)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(4, dtype=np.int8)
+ a = x[3:]
+ b = x.view(np.int32)[:1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy needed, 3 ops, read-write overlap
+ for flag in ['writeonly', 'readwrite']:
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ with nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], [flag]]) as i:
+ a2, b2, c2 = i.operands
+ assert_(not np.shares_memory(a2, c2))
+ assert_(not np.shares_memory(b2, c2))
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = np.ones([10, 10])
+ c = x.T
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, write-only overlap
+ x = np.arange(7)
+ a = x[:3]
+ b = x[3:6]
+ c = x[4:7]
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['writeonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+def test_iter_op_axes():
+ # Check that custom axes work
+
+ # Reverse the axes
+ a = arange(6).reshape(2, 3)
+ i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
+ assert_(all([x == y for (x, y) in i]))
+ a = arange(24).reshape(2, 3, 4)
+ i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
+ assert_(all([x == y for (x, y) in i]))
+
+ # Broadcast 1D to any dimension
+ a = arange(1, 31).reshape(2, 3, 5)
+ b = arange(1, 3)
+ i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
+ b = arange(1, 4)
+ i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
+ b = arange(1, 6)
+ i = nditer([a, b], [], [['readonly']]*2,
+ op_axes=[None, [np.newaxis, np.newaxis, 0]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
+
+ # Inner product-style broadcasting
+ a = arange(24).reshape(2, 3, 4)
+ b = arange(40).reshape(5, 2, 4)
+ i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+ op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
+ assert_equal(i.shape, (2, 3, 5, 2))
+
+ # Matrix product-style broadcasting
+ a = arange(12).reshape(3, 4)
+ b = arange(20).reshape(4, 5)
+ i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+ op_axes=[[0, -1], [-1, 1]])
+ assert_equal(i.shape, (3, 5))
+
+def test_iter_op_axes_errors():
+ # Check that custom axes throws errors for bad inputs
+
+ # Wrong number of items in op_axes
+ a = arange(6).reshape(2, 3)
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0], [1], [0]])
+ # Out of bounds items in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[2, 1], [0, 1]])
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [2, -1]])
+ # Duplicate items in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 0], [0, 1]])
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [1, 1]])
+
+ # Different sized arrays in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [0, 1, 0]])
+
+ # Non-broadcastable dimensions in the result
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [1, 0]])
+
+def test_iter_copy():
+ # Check that copying the iterator works correctly
+ a = arange(24).reshape(2, 3, 4)
+
+ # Simple iterator
+ i = nditer(a)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterindex = 3
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ # Buffered iterator
+ i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterindex = 3
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterrange = (3, 9)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterrange = (2, 18)
+ next(i)
+ next(i)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ # Casting iterator
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='f8', buffersize=5) as i:
+ j = i.copy()
+ assert_equal([x[()] for x in j], a.ravel(order='F'))
+
+ a = arange(24, dtype='cast->swap
+
+ a = np.arange(10, dtype='f4').newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f8').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+
+ assert_equal(a, 2*np.arange(10, dtype='f4'))
+
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+
+ a = np.arange(10, dtype='f8').newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('c8').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+
+ assert_equal(a, 2*np.arange(10, dtype='f8'))
+
+def test_iter_buffered_cast_byteswapped_complex():
+ # Test that buffering can handle a cast which requires swap->cast->copy
+
+ a = np.arange(10, dtype='c8').newbyteorder().byteswap()
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16')],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+ a = np.arange(10, dtype='c8')
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+ a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16')],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
+
+ a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')],
+ buffersize=7)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
+
+def test_iter_buffered_cast_structured_type():
+ # Tests buffering of structured types
+
+ # simple -> struct type (duplicates the value)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.arange(3, dtype='f4') + 0.5
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt)
+ vals = [np.array(x) for x in i]
+ assert_equal(vals[0]['a'], 0.5)
+ assert_equal(vals[0]['b'], 0)
+ assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+ assert_equal(vals[0]['d'], 0.5)
+ assert_equal(vals[1]['a'], 1.5)
+ assert_equal(vals[1]['b'], 1)
+ assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+ assert_equal(vals[1]['d'], 1.5)
+ assert_equal(vals[0].dtype, np.dtype(sdt))
+
+ # object -> struct type
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.zeros((3,), dtype='O')
+ a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
+ a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
+ a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(a[0])
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt)
+ vals = [x.copy() for x in i]
+ assert_equal(vals[0]['a'], 0.5)
+ assert_equal(vals[0]['b'], 0)
+ assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+ assert_equal(vals[0]['d'], 0.5)
+ assert_equal(vals[1]['a'], 1.5)
+ assert_equal(vals[1]['b'], 1)
+ assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+ assert_equal(vals[1]['d'], 1.5)
+ assert_equal(vals[0].dtype, np.dtype(sdt))
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(a[0]), rc)
+
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')
+ assert_equal([x_[()] for x_ in i], [5, 8])
+
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(ValueError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
+ # struct type -> struct type (field-wise copy)
+ sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
+ a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ assert_equal([np.array(x_) for x_ in i],
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
+
+ # make sure struct type -> struct type with different
+ # number of fields fails
+ sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ sdt2 = [('b', 'O'), ('a', 'f8')]
+ a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+
+ assert_raises(ValueError, lambda : (
+ nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)))
+
+
+def test_iter_buffered_cast_subarray():
+ # Tests buffering of subarrays
+
+ # one element -> many (copies it to all)
+ sdt1 = [('a', 'f4')]
+ sdt2 = [('a', 'f8', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ for x, count in zip(i, list(range(6))):
+ assert_(np.all(x['a'] == count))
+
+ # one element -> many -> back (copies it to all)
+ sdt1 = [('a', 'O', (1, 1))]
+ sdt2 = [('a', 'O', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_(np.all(x['a'] == count))
+ x['a'][0] += 2
+ count += 1
+ assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
+
+ # many -> one element -> back (copies just element 0)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'O', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ x['a'] += 2
+ count += 1
+ assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
+
+ # many -> one element -> back (copies just element 0)
+ sdt1 = [('a', 'f8', (3, 2, 2))]
+ sdt2 = [('a', 'O', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ count += 1
+
+ # many -> one element (copies just element 0)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'f4', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ count += 1
+
+ # many -> matching shape (straightforward copy)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'f4', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], a[count]['a'])
+ count += 1
+
+ # vector -> smaller vector (truncates)
+ sdt1 = [('a', 'f8', (6,))]
+ sdt2 = [('a', 'f4', (2,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*6).reshape(6, 6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], a[count]['a'][:2])
+ count += 1
+
+ # vector -> bigger vector (pads with zeros)
+ sdt1 = [('a', 'f8', (2,))]
+ sdt2 = [('a', 'f4', (6,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2], a[count]['a'])
+ assert_equal(x['a'][2:], [0, 0, 0, 0])
+ count += 1
+
+ # vector -> matrix (broadcasts)
+ sdt1 = [('a', 'f8', (2,))]
+ sdt2 = [('a', 'f4', (2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][0], a[count]['a'])
+ assert_equal(x['a'][1], a[count]['a'])
+ count += 1
+
+ # vector -> matrix (broadcasts and zero-pads)
+ sdt1 = [('a', 'f8', (2, 1))]
+ sdt2 = [('a', 'f4', (3, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2, 1)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+ assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
+ assert_equal(x['a'][2,:], [0, 0])
+ count += 1
+
+ # matrix -> matrix (truncates and zero-pads)
+ sdt1 = [('a', 'f8', (2, 3))]
+ sdt2 = [('a', 'f4', (3, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+ assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
+ assert_equal(x['a'][2,:], [0, 0])
+ count += 1
+
+def test_iter_buffering_badwriteback():
+ # Writing back from a buffer cannot combine elements
+
+ # a needs write buffering, but had a broadcast dimension
+ a = np.arange(6).reshape(2, 3, 1)
+ b = np.arange(12).reshape(2, 3, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+ # But if a is readonly, it's fine
+ nditer([a, b], ['buffered', 'external_loop'],
+ [['readonly'], ['writeonly']],
+ order='C')
+
+ # If a has just one element, it's fine too (constant 0 stride, a reduction)
+ a = np.arange(1).reshape(1, 1, 1)
+ nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+ # check that it fails on other dimensions too
+ a = np.arange(6).reshape(1, 3, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+ a = np.arange(4).reshape(2, 1, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+def test_iter_buffering_string():
+ # Safe casting disallows shrinking strings
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
+ assert_equal(a.dtype, np.dtype('S4'))
+ assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+ op_dtypes='S2')
+ i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
+ assert_equal(i[0], b'abc')
+ assert_equal(i[0].dtype, np.dtype('S6'))
+
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
+ assert_equal(a.dtype, np.dtype('U4'))
+ assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+ op_dtypes='U2')
+ i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
+ assert_equal(i[0], u'abc')
+ assert_equal(i[0].dtype, np.dtype('U6'))
+
+def test_iter_buffering_growinner():
+ # Test that the inner loop grows when no buffering is needed
+ a = np.arange(30)
+ i = nditer(a, ['buffered', 'growinner', 'external_loop'],
+ buffersize=5)
+ # Should end up with just one inner loop here
+ assert_equal(i[0].size, a.size)
+
+
+@pytest.mark.slow
+def test_iter_buffered_reduce_reuse():
+ # large enough array for all views, including negative strides.
+ a = np.arange(2*3**5)[3**5:3**5+1]
+ flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
+ op_flags = [('readonly',), ('readwrite', 'allocate')]
+ op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
+ # wrong dtype to force buffering
+ op_dtypes = [float, a.dtype]
+
+ def get_params():
+ for xs in range(-3**2, 3**2 + 1):
+ for ys in range(xs, 3**2 + 1):
+ for op_axes in op_axes_list:
+ # last stride is reduced and because of that not
+ # important for this test, as it is the inner stride.
+ strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
+ arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
+
+ for skip in [0, 1]:
+ yield arr, op_axes, skip
+
+ for arr, op_axes, skip in get_params():
+ nditer2 = np.nditer([arr.copy(), None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ op_dtypes=op_dtypes)
+ with nditer2:
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
+
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
+
+ comp_res = nditer2.operands[-1]
+
+ for bufsize in range(0, 3**3):
+ nditer1 = np.nditer([arr, None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ buffersize=bufsize, op_dtypes=op_dtypes)
+ with nditer1:
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
+
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
+
+ res = nditer1.operands[-1]
+ assert_array_equal(res, comp_res)
+
+
+def test_iter_no_broadcast():
+ # Test that the no_broadcast flag works
+ a = np.arange(24).reshape(2, 3, 4)
+ b = np.arange(6).reshape(2, 3, 1)
+ c = np.arange(12).reshape(3, 4)
+
+ nditer([a, b, c], [],
+ [['readonly', 'no_broadcast'],
+ ['readonly'], ['readonly']])
+ assert_raises(ValueError, nditer, [a, b, c], [],
+ [['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
+ assert_raises(ValueError, nditer, [a, b, c], [],
+ [['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
+
+
+class TestIterNested(object):
+
+ def test_basic(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_reorder(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ # In 'K' order (default), it gets reordered
+ i, j = np.nested_iters(a, [[0], [2, 1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, it doesn't
+ i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
+
+ def test_flip_axes(self):
+ # Test nested iteration with negative axes
+ a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
+
+ # In 'K' order (default), the axes all get flipped
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, flipping axes is disabled
+ i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
+
+ def test_broadcast(self):
+ # Test nested iteration with broadcasting
+ a = arange(2).reshape(2, 1)
+ b = arange(3).reshape(1, 3)
+
+ i, j = np.nested_iters([a, b], [[0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
+
+ i, j = np.nested_iters([a, b], [[1], [0]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
+
+ def test_dtype_copy(self):
+ # Test nested iteration with a copy to change dtype
+
+ # copy
+ a = arange(6, dtype='i4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readonly', 'copy'],
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
+ vals = None
+
+ # writebackifcopy - using conext manager
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ # writebackifcopy - using close()
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ i.close()
+ j.close()
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_0d(self):
+ a = np.arange(12).reshape(2, 3, 2)
+ i, j = np.nested_iters(a, [[], [1, 0, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0, 2], []])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
+
+ i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
+ vals = []
+ for x in i:
+ for y in j:
+ vals.append([z for z in k])
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_iter_nested_iters_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+def test_iter_reduction_error():
+
+ a = np.arange(6)
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0], [-1]])
+
+ a = np.arange(6).reshape(2, 3)
+ assert_raises(ValueError, nditer, [a, None], ['external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0, 1], [-1, -1]])
+
+def test_iter_reduction():
+ # Test doing reductions with the iterator
+
+ a = np.arange(6)
+ i = nditer([a, None], ['reduce_ok'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0], [-1]])
+ # Need to initialize the output operand to the addition unit
+ with i:
+ i.operands[1][...] = 0
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
+
+ a = np.arange(6).reshape(2, 3)
+ i = nditer([a, None], ['reduce_ok', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0, 1], [-1, -1]])
+ # Need to initialize the output operand to the addition unit
+ with i:
+ i.operands[1][...] = 0
+ # Reduction shape/strides for the output
+ assert_equal(i[1].shape, (6,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
+
+ # This is a tricky reduction case for the buffering double loop
+ # to handle
+ a = np.ones((2, 3, 5))
+ it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[None, [0, -1, 1]])
+ it2 = nditer([a, None], ['reduce_ok', 'external_loop',
+ 'buffered', 'delay_bufalloc'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[None, [0, -1, 1]], buffersize=10)
+ with it1, it2:
+ it1.operands[1].fill(0)
+ it2.operands[1].fill(0)
+ it2.reset()
+ for x in it1:
+ x[1][...] += x[0]
+ for x in it2:
+ x[1][...] += x[0]
+ assert_equal(it1.operands[1], it2.operands[1])
+ assert_equal(it2.operands[1].sum(), a.size)
+
+def test_iter_buffering_reduction():
+ # Test doing buffered reductions with the iterator
+
+ a = np.arange(6)
+ b = np.array(0., dtype='f8').byteswap().newbyteorder()
+ i = nditer([a, b], ['reduce_ok', 'buffered'],
+ [['readonly'], ['readwrite', 'nbo']],
+ op_axes=[[0], [-1]])
+ with i:
+ assert_equal(i[1].dtype, np.dtype('f8'))
+ assert_(i[1].dtype != b.dtype)
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(b, np.sum(a))
+
+ a = np.arange(6).reshape(2, 3)
+ b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
+ i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
+ [['readonly'], ['readwrite', 'nbo']],
+ op_axes=[[0, 1], [0, -1]])
+ # Reduction shape/strides for the output
+ with i:
+ assert_equal(i[1].shape, (3,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ assert_equal(b, np.sum(a, axis=1))
+
+ # Iterator inner double loop was wrong on this one
+ p = np.arange(2) + 1
+ it = np.nditer([p, None],
+ ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[-1, 0], [-1, -1]],
+ itershape=(2, 2))
+ with it:
+ it.operands[1].fill(0)
+ it.reset()
+ assert_equal(it[0], [1, 2, 1, 2])
+
+ # Iterator inner loop should take argument contiguity into account
+ x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ x[...] = np.arange(x.size).reshape(x.shape)
+ y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
+ y_base_copy = y_base.copy()
+ y = y_base[::2,:,None]
+
+ it = np.nditer([y, x],
+ ['buffered', 'external_loop', 'reduce_ok'],
+ [['readwrite'], ['readonly']])
+ with it:
+ for a, b in it:
+ a.fill(2)
+
+ assert_equal(y_base[1::2], y_base_copy[1::2])
+ assert_equal(y_base[::2], 2)
+
+def test_iter_buffering_reduction_reuse_reduce_loops():
+ # There was a bug triggering reuse of the reduce loop inappropriately,
+ # which caused processing to happen in unnecessarily small chunks
+ # and overran the buffer.
+
+ a = np.zeros((2, 7))
+ b = np.zeros((1, 7))
+ it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
+ op_flags=[['readonly'], ['readwrite']],
+ buffersize=5)
+
+ with it:
+ bufsizes = [x.shape[0] for x, y in it]
+ assert_equal(bufsizes, [5, 2, 5, 2])
+ assert_equal(sum(bufsizes), a.size)
+
+def test_iter_writemasked_badinput():
+ a = np.zeros((2, 3))
+ b = np.zeros((3,))
+ m = np.array([[True, True, False], [False, True, False]])
+ m2 = np.array([True, True, False])
+ m3 = np.array([0, 1, 1], dtype='u1')
+ mbad1 = np.array([0, 1, 1], dtype='i1')
+ mbad2 = np.array([0, 1, 1], dtype='f4')
+
+ # Need an 'arraymask' if any operand is 'writemasked'
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readwrite', 'writemasked'], ['readonly']])
+
+ # A 'writemasked' operand must not be readonly
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readonly', 'writemasked'], ['readonly', 'arraymask']])
+
+ # 'writemasked' and 'arraymask' may not be used together
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readonly'], ['readwrite', 'arraymask', 'writemasked']])
+
+ # 'arraymask' may only be specified once
+ assert_raises(ValueError, nditer, [a, m, m2], [],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask'],
+ ['readonly', 'arraymask']])
+
+ # An 'arraymask' with nothing 'writemasked' also doesn't make sense
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readwrite'], ['readonly', 'arraymask']])
+
+ # A writemasked reduction requires a similarly smaller mask
+ assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ # But this should work with a smaller/equal mask to the reduction operand
+ np.nditer([a, b, m2], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ # The arraymask itself cannot be a reduction
+ assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readwrite', 'arraymask']])
+
+ # A uint8 mask is ok too
+ np.nditer([a, m3], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+ # An int8 mask isn't ok
+ assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+ # A float32 mask isn't ok
+ assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+
+def test_iter_writemasked():
+ a = np.zeros((3,), dtype='f8')
+ msk = np.array([True, True, False])
+
+ # When buffering is unused, 'writemasked' effectively does nothing.
+ # It's up to the user of the iterator to obey the requested semantics.
+ it = np.nditer([a, msk], [],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ with it:
+ for x, m in it:
+ x[...] = 1
+ # Because we violated the semantics, all the values became 1
+ assert_equal(a, [1, 1, 1])
+
+ # Even if buffering is enabled, we still may be accessing the array
+ # directly.
+ it = np.nditer([a, msk], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ with it:
+ for x, m in it:
+ x[...] = 2.5
+ # Because we violated the semantics, all the values became 2.5
+ assert_equal(a, [2.5, 2.5, 2.5])
+
+ # If buffering will definitely happening, for instance because of
+ # a cast, only the items selected by the mask will be copied back from
+ # the buffer.
+ it = np.nditer([a, msk], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['i8', None],
+ casting='unsafe')
+ with it:
+ for x, m in it:
+ x[...] = 3
+ # Even though we violated the semantics, only the selected values
+ # were copied back
+ assert_equal(a, [3, 3, 2.5])
+
+def test_iter_non_writable_attribute_deletion():
+ it = np.nditer(np.ones(2))
+ attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
+ "iterationneedsapi", "has_multi_index", "has_index", "dtypes",
+ "ndim", "nop", "itersize", "finished"]
+
+ for s in attr:
+ assert_raises(AttributeError, delattr, it, s)
+
+
+def test_iter_writable_attribute_deletion():
+ it = np.nditer(np.ones(2))
+ attr = [ "multi_index", "index", "iterrange", "iterindex"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, it, s)
+
+
+def test_iter_element_deletion():
+ it = np.nditer(np.ones(3))
+ try:
+ del it[1]
+ del it[1:2]
+ except TypeError:
+ pass
+ except Exception:
+ raise AssertionError
+
+def test_iter_allocated_array_dtypes():
+ # If the dtype of an allocated output has a shape, the shape gets
+ # tacked onto the end of the result.
+ it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
+ for a, b in it:
+ b[0] = a - 1
+ b[1] = a + 1
+ assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
+
+ # Make sure this works for scalars too
+ it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
+ for a, b, c in it:
+ c[0, 0] = a - b
+ c[0, 1] = a + b
+ c[1, 0] = a * b
+ c[1, 1] = a / b
+ assert_equal(it.operands[2], [[8, 12], [20, 5]])
+
+
+def test_0d_iter():
+ # Basic test for iteration of 0-d arrays:
+ i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.ndim, 0)
+ assert_equal(next(i), (2, 3))
+ assert_equal(i.multi_index, ())
+ assert_equal(i.iterindex, 0)
+ assert_raises(StopIteration, next, i)
+ # test reset:
+ i.reset()
+ assert_equal(next(i), (2, 3))
+ assert_raises(StopIteration, next, i)
+
+ # test forcing to 0-d
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+ # note that itershape=(), still behaves like None due to the conversions
+
+ # Test a more complex buffered casting case (same as another test above)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.array(0.5, dtype='f4')
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe', op_dtypes=sdt)
+ vals = next(i)
+ assert_equal(vals['a'], 0.5)
+ assert_equal(vals['b'], 0)
+ assert_equal(vals['c'], [[(0.5)]*3]*2)
+ assert_equal(vals['d'], 0.5)
+
+
+def test_iter_too_large():
+ # The total size of the iterator must not exceed the maximum intp due
+ # to broadcasting. Dividing by 1024 will keep it small enough to
+ # give a legal array.
+ size = np.iinfo(np.intp).max // 1024
+ arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
+ assert_raises(ValueError, nditer, (arr, arr[:, None]))
+ # test the same for multiindex. That may get more interesting when
+ # removing 0 dimensional axis is allowed (since an iterator can grow then)
+ assert_raises(ValueError, nditer,
+ (arr, arr[:, None]), flags=['multi_index'])
+
+
+def test_iter_too_large_with_multiindex():
+ # When a multi index is being tracked, the error is delayed this
+ # checks the delayed error messages and getting below that by
+ # removing an axis.
+ base_size = 2**10
+ num = 1
+ while base_size**num < np.iinfo(np.intp).max:
+ num += 1
+
+ shape_template = [1, 1] * num
+ arrays = []
+ for i in range(num):
+ shape = shape_template[:]
+ shape[i * 2] = 2**10
+ arrays.append(np.empty(shape))
+ arrays = tuple(arrays)
+
+ # arrays are now too large to be broadcast. The different modes test
+ # different nditer functionality with or without GIL.
+ for mode in range(6):
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, -1, mode)
+ # but if we do nothing with the nditer, it can be constructed:
+ _multiarray_tests.test_nditer_too_large(arrays, -1, 7)
+
+ # When an axis is removed, things should work again (half the time):
+ for i in range(num):
+ for mode in range(6):
+ # an axis with size 1024 is removed:
+ _multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
+ # an axis with size 1 is removed:
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
+
+def test_writebacks():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][:] = 100
+ assert_equal(au, 100)
+ # do it again, this time raise an error,
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ try:
+ with it:
+ assert_equal(au.flags.writeable, False)
+ it.operands[0][:] = 0
+ raise ValueError('exit context manager on exception')
+ except:
+ pass
+ assert_equal(au, 0)
+ assert_equal(au.flags.writeable, True)
+ # cannot reuse i outside context manager
+ assert_raises(ValueError, getattr, it, 'operands')
+
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0]
+ x[:] = 6
+ assert_(x.flags.writebackifcopy)
+ assert_equal(au, 6)
+ assert_(not x.flags.writebackifcopy)
+ x[:] = 123 # x.data still valid
+ assert_equal(au, 6) # but not connected to au
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
+ # do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
+ with it:
+ for x in it:
+ x[...] = 123
+ # make sure we cannot reenter the closed iterator
+ enter = it.__enter__
+ assert_raises(RuntimeError, enter)
+
+def test_close_equivalent():
+ ''' using a context amanger and using nditer.close are equivalent
+ '''
+ def add_close(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ ret = it.operands[2]
+ it.close()
+ return ret
+
+ def add_context(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+ z = add_close(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+ z = add_context(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_warn_noclose():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del it
+ assert len(sup.log) == 1
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc
new file mode 100644
index 0000000..e623a89
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_nditer.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py
new file mode 100644
index 0000000..3753472
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.py
@@ -0,0 +1,2750 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+import itertools
+import platform
+import pytest
+from decimal import Decimal
+
+import numpy as np
+from numpy.core import umath
+from numpy.random import rand, randint, randn
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ HAS_REFCOUNT
+ )
+
+
+class TestResize(object):
+ def test_copies(self):
+ A = np.array([[1, 2], [3, 4]])
+ Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
+ assert_equal(np.resize(A, (2, 4)), Ar1)
+
+ Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
+ assert_equal(np.resize(A, (4, 2)), Ar2)
+
+ Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
+ assert_equal(np.resize(A, (4, 3)), Ar3)
+
+ def test_zeroresize(self):
+ A = np.array([[1, 2], [3, 4]])
+ Ar = np.resize(A, (0,))
+ assert_array_equal(Ar, np.array([]))
+ assert_equal(A.dtype, Ar.dtype)
+
+ Ar = np.resize(A, (0, 2))
+ assert_equal(Ar.shape, (0, 2))
+
+ Ar = np.resize(A, (2, 0))
+ assert_equal(Ar.shape, (2, 0))
+
+ def test_reshape_from_zero(self):
+ # See also gh-6740
+ A = np.zeros(0, dtype=[('a', np.float32, 1)])
+ Ar = np.resize(A, (2, 1))
+ assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
+ assert_equal(A.dtype, Ar.dtype)
+
+
+class TestNonarrayArgs(object):
+ # check that non-array arguments to functions wrap them in arrays
+ def test_choose(self):
+ choices = [[0, 1, 2],
+ [3, 4, 5],
+ [5, 6, 7]]
+ tgt = [5, 1, 5]
+ a = [2, 0, 1]
+
+ out = np.choose(a, choices)
+ assert_equal(out, tgt)
+
+ def test_clip(self):
+ arr = [-1, 5, 2, 3, 10, -4, -9]
+ out = np.clip(arr, 2, 7)
+ tgt = [2, 5, 2, 3, 7, 2, 2]
+ assert_equal(out, tgt)
+
+ def test_compress(self):
+ arr = [[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]]
+ tgt = [[5, 6, 7, 8, 9]]
+ out = np.compress([0, 1], arr, axis=0)
+ assert_equal(out, tgt)
+
+ def test_count_nonzero(self):
+ arr = [[0, 1, 7, 0, 0],
+ [3, 0, 0, 2, 19]]
+ tgt = np.array([2, 3])
+ out = np.count_nonzero(arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_cumproduct(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
+
+ def test_diagonal(self):
+ a = [[0, 1, 2, 3],
+ [4, 5, 6, 7],
+ [8, 9, 10, 11]]
+ out = np.diagonal(a)
+ tgt = [0, 5, 10]
+
+ assert_equal(out, tgt)
+
+ def test_mean(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.mean(A) == 3.5)
+ assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
+ assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.mean([])))
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_ptp(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.ptp(a, axis=0), 15.0)
+
+ def test_prod(self):
+ arr = [[1, 2, 3, 4],
+ [5, 6, 7, 9],
+ [10, 3, 4, 5]]
+ tgt = [24, 1890, 600]
+
+ assert_equal(np.prod(arr, axis=-1), tgt)
+
+ def test_ravel(self):
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+ tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
+ assert_equal(np.ravel(a), tgt)
+
+ def test_repeat(self):
+ a = [1, 2, 3]
+ tgt = [1, 1, 2, 2, 3, 3]
+
+ out = np.repeat(a, 2)
+ assert_equal(out, tgt)
+
+ def test_reshape(self):
+ arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+ tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+ assert_equal(np.reshape(arr, (2, 6)), tgt)
+
+ def test_round(self):
+ arr = [1.56, 72.54, 6.35, 3.25]
+ tgt = [1.6, 72.5, 6.4, 3.2]
+ assert_equal(np.around(arr, decimals=1), tgt)
+
+ def test_searchsorted(self):
+ arr = [-8, -5, -1, 3, 6, 10]
+ out = np.searchsorted(arr, 0)
+ assert_equal(out, 3)
+
+ def test_size(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.size(A) == 6)
+ assert_(np.size(A, 0) == 2)
+ assert_(np.size(A, 1) == 3)
+
+ def test_squeeze(self):
+ A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
+ assert_(np.squeeze(A).shape == (3, 3))
+
+ def test_std(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_almost_equal(np.std(A), 1.707825127659933)
+ assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
+ assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.std([])))
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_swapaxes(self):
+ tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
+ a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
+ out = np.swapaxes(a, 0, 2)
+ assert_equal(out, tgt)
+
+ def test_sum(self):
+ m = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ tgt = [[6], [15], [24]]
+ out = np.sum(m, axis=1, keepdims=True)
+
+ assert_equal(tgt, out)
+
+ def test_take(self):
+ tgt = [2, 3, 5]
+ indices = [1, 2, 4]
+ a = [1, 2, 3, 4, 5]
+
+ out = np.take(a, indices)
+ assert_equal(out, tgt)
+
+ def test_trace(self):
+ c = [[1, 2], [3, 4], [5, 6]]
+ assert_equal(np.trace(c), 5)
+
+ def test_transpose(self):
+ arr = [[1, 2], [3, 4], [5, 6]]
+ tgt = [[1, 3, 5], [2, 4, 6]]
+ assert_equal(np.transpose(arr, (1, 0)), tgt)
+
+ def test_var(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_almost_equal(np.var(A), 2.9166666666666665)
+ assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
+ assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.var([])))
+ assert_(w[0].category is RuntimeWarning)
+
+
+class TestIsscalar(object):
+ def test_isscalar(self):
+ assert_(np.isscalar(3.1))
+ assert_(np.isscalar(np.int16(12345)))
+ assert_(np.isscalar(False))
+ assert_(np.isscalar('numpy'))
+ assert_(not np.isscalar([3.1]))
+ assert_(not np.isscalar(None))
+
+ # PEP 3141
+ from fractions import Fraction
+ assert_(np.isscalar(Fraction(5, 17)))
+ from numbers import Number
+ assert_(np.isscalar(Number()))
+
+
+class TestBoolScalar(object):
+ def test_logical(self):
+ f = np.False_
+ t = np.True_
+ s = "xyz"
+ assert_((t and s) is s)
+ assert_((f and s) is f)
+
+ def test_bitwise_or(self):
+ f = np.False_
+ t = np.True_
+ assert_((t | t) is t)
+ assert_((f | t) is t)
+ assert_((t | f) is t)
+ assert_((f | f) is f)
+
+ def test_bitwise_and(self):
+ f = np.False_
+ t = np.True_
+ assert_((t & t) is t)
+ assert_((f & t) is f)
+ assert_((t & f) is f)
+ assert_((f & f) is f)
+
+ def test_bitwise_xor(self):
+ f = np.False_
+ t = np.True_
+ assert_((t ^ t) is f)
+ assert_((f ^ t) is t)
+ assert_((t ^ f) is t)
+ assert_((f ^ f) is f)
+
+
+class TestBoolArray(object):
+ def setup(self):
+ # offset for simd tests
+ self.t = np.array([True] * 41, dtype=bool)[1::]
+ self.f = np.array([False] * 41, dtype=bool)[1::]
+ self.o = np.array([False] * 42, dtype=bool)[2::]
+ self.nm = self.f.copy()
+ self.im = self.t.copy()
+ self.nm[3] = True
+ self.nm[-2] = True
+ self.im[3] = False
+ self.im[-2] = False
+
+ def test_all_any(self):
+ assert_(self.t.all())
+ assert_(self.t.any())
+ assert_(not self.f.all())
+ assert_(not self.f.any())
+ assert_(self.nm.any())
+ assert_(self.im.any())
+ assert_(not self.nm.all())
+ assert_(not self.im.all())
+ # check bad element in all positions
+ for i in range(256 - 7):
+ d = np.array([False] * 256, dtype=bool)[7::]
+ d[i] = True
+ assert_(np.any(d))
+ e = np.array([True] * 256, dtype=bool)[7::]
+ e[i] = False
+ assert_(not np.all(e))
+ assert_array_equal(e, ~d)
+ # big array test for blocked libc loops
+ for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
+ d = np.array([False] * 100043, dtype=bool)
+ d[i] = True
+ assert_(np.any(d), msg="%r" % i)
+ e = np.array([True] * 100043, dtype=bool)
+ e[i] = False
+ assert_(not np.all(e), msg="%r" % i)
+
+ def test_logical_not_abs(self):
+ assert_array_equal(~self.t, self.f)
+ assert_array_equal(np.abs(~self.t), self.f)
+ assert_array_equal(np.abs(~self.f), self.t)
+ assert_array_equal(np.abs(self.f), self.f)
+ assert_array_equal(~np.abs(self.f), self.t)
+ assert_array_equal(~np.abs(self.t), self.f)
+ assert_array_equal(np.abs(~self.nm), self.im)
+ np.logical_not(self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+ np.abs(self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+
+ def test_logical_and_or_xor(self):
+ assert_array_equal(self.t | self.t, self.t)
+ assert_array_equal(self.f | self.f, self.f)
+ assert_array_equal(self.t | self.f, self.t)
+ assert_array_equal(self.f | self.t, self.t)
+ np.logical_or(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t & self.t, self.t)
+ assert_array_equal(self.f & self.f, self.f)
+ assert_array_equal(self.t & self.f, self.f)
+ assert_array_equal(self.f & self.t, self.f)
+ np.logical_and(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t ^ self.t, self.f)
+ assert_array_equal(self.f ^ self.f, self.f)
+ assert_array_equal(self.t ^ self.f, self.t)
+ assert_array_equal(self.f ^ self.t, self.t)
+ np.logical_xor(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+
+ assert_array_equal(self.nm & self.t, self.nm)
+ assert_array_equal(self.im & self.f, False)
+ assert_array_equal(self.nm & True, self.nm)
+ assert_array_equal(self.im & False, self.f)
+ assert_array_equal(self.nm | self.t, self.t)
+ assert_array_equal(self.im | self.f, self.im)
+ assert_array_equal(self.nm | True, self.t)
+ assert_array_equal(self.im | False, self.im)
+ assert_array_equal(self.nm ^ self.t, self.im)
+ assert_array_equal(self.im ^ self.f, self.im)
+ assert_array_equal(self.nm ^ True, self.im)
+ assert_array_equal(self.im ^ False, self.im)
+
+
+class TestBoolCmp(object):
+ def setup(self):
+ self.f = np.ones(256, dtype=np.float32)
+ self.ef = np.ones(self.f.size, dtype=bool)
+ self.d = np.ones(128, dtype=np.float64)
+ self.ed = np.ones(self.d.size, dtype=bool)
+ # generate values for all permutation of 256bit simd vectors
+ s = 0
+ for i in range(32):
+ self.f[s:s+8] = [i & 2**x for x in range(8)]
+ self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
+ s += 8
+ s = 0
+ for i in range(16):
+ self.d[s:s+4] = [i & 2**x for x in range(4)]
+ self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
+ s += 4
+
+ self.nf = self.f.copy()
+ self.nd = self.d.copy()
+ self.nf[self.ef] = np.nan
+ self.nd[self.ed] = np.nan
+
+ self.inff = self.f.copy()
+ self.infd = self.d.copy()
+ self.inff[::3][self.ef[::3]] = np.inf
+ self.infd[::3][self.ed[::3]] = np.inf
+ self.inff[1::3][self.ef[1::3]] = -np.inf
+ self.infd[1::3][self.ed[1::3]] = -np.inf
+ self.inff[2::3][self.ef[2::3]] = np.nan
+ self.infd[2::3][self.ed[2::3]] = np.nan
+ self.efnonan = self.ef.copy()
+ self.efnonan[2::3] = False
+ self.ednonan = self.ed.copy()
+ self.ednonan[2::3] = False
+
+ self.signf = self.f.copy()
+ self.signd = self.d.copy()
+ self.signf[self.ef] *= -1.
+ self.signd[self.ed] *= -1.
+ self.signf[1::6][self.ef[1::6]] = -np.inf
+ self.signd[1::6][self.ed[1::6]] = -np.inf
+ self.signf[3::6][self.ef[3::6]] = -np.nan
+ self.signd[3::6][self.ed[3::6]] = -np.nan
+ self.signf[4::6][self.ef[4::6]] = -0.
+ self.signd[4::6][self.ed[4::6]] = -0.
+
+ def test_float(self):
+ # offset for alignment test
+ for i in range(4):
+ assert_array_equal(self.f[i:] > 0, self.ef[i:])
+ assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
+ assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
+ assert_array_equal(-self.f[i:] < 0, self.ef[i:])
+ assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
+ r = self.f[i:] != 0
+ assert_array_equal(r, self.ef[i:])
+ r2 = self.f[i:] != np.zeros_like(self.f[i:])
+ r3 = 0 != self.f[i:]
+ assert_array_equal(r, r2)
+ assert_array_equal(r, r3)
+ # check bool == 0x1
+ assert_array_equal(r.view(np.int8), r.astype(np.int8))
+ assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+ assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+ # isnan on amd64 takes the same code path
+ assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
+ assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
+ assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
+ assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
+ assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
+
+ def test_double(self):
+ # offset for alignment test
+ for i in range(2):
+ assert_array_equal(self.d[i:] > 0, self.ed[i:])
+ assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
+ assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
+ assert_array_equal(-self.d[i:] < 0, self.ed[i:])
+ assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
+ r = self.d[i:] != 0
+ assert_array_equal(r, self.ed[i:])
+ r2 = self.d[i:] != np.zeros_like(self.d[i:])
+ r3 = 0 != self.d[i:]
+ assert_array_equal(r, r2)
+ assert_array_equal(r, r3)
+ # check bool == 0x1
+ assert_array_equal(r.view(np.int8), r.astype(np.int8))
+ assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+ assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+ # isnan on amd64 takes the same code path
+ assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
+ assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
+ assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
+
+
+class TestSeterr(object):
+ def test_default(self):
+ err = np.geterr()
+ assert_equal(err,
+ dict(divide='warn',
+ invalid='warn',
+ over='warn',
+ under='ignore')
+ )
+
+ def test_set(self):
+ with np.errstate():
+ err = np.seterr()
+ old = np.seterr(divide='print')
+ assert_(err == old)
+ new = np.seterr()
+ assert_(new['divide'] == 'print')
+ np.seterr(over='raise')
+ assert_(np.geterr()['over'] == 'raise')
+ assert_(new['divide'] == 'print')
+ np.seterr(**old)
+ assert_(np.geterr() == old)
+
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+ def test_divide_err(self):
+ with np.errstate(divide='raise'):
+ with assert_raises(FloatingPointError):
+ np.array([1.]) / np.array([0.])
+
+ np.seterr(divide='ignore')
+ np.array([1.]) / np.array([0.])
+
+ def test_errobj(self):
+ olderrobj = np.geterrobj()
+ self.called = 0
+ try:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ with np.errstate(divide='warn'):
+ np.seterrobj([20000, 1, None])
+ np.array([1.]) / np.array([0.])
+ assert_equal(len(w), 1)
+
+ def log_err(*args):
+ self.called += 1
+ extobj_err = args
+ assert_(len(extobj_err) == 2)
+ assert_("divide" in extobj_err[0])
+
+ with np.errstate(divide='ignore'):
+ np.seterrobj([20000, 3, log_err])
+ np.array([1.]) / np.array([0.])
+ assert_equal(self.called, 1)
+
+ np.seterrobj(olderrobj)
+ with np.errstate(divide='ignore'):
+ np.divide(1., 0., extobj=[20000, 3, log_err])
+ assert_equal(self.called, 2)
+ finally:
+ np.seterrobj(olderrobj)
+ del self.called
+
+ def test_errobj_noerrmask(self):
+ # errmask = 0 has a special code path for the default
+ olderrobj = np.geterrobj()
+ try:
+ # set errobj to something non default
+ np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
+ umath.ERR_DEFAULT + 1, None])
+ # call a ufunc
+ np.isnan(np.array([6]))
+ # same with the default, lots of times to get rid of possible
+ # pre-existing stack in the code
+ for i in range(10000):
+ np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
+ None])
+ np.isnan(np.array([6]))
+ finally:
+ np.seterrobj(olderrobj)
+
+
+class TestFloatExceptions(object):
+ def assert_raises_fpe(self, fpeerr, flop, x, y):
+ ftype = type(x)
+ try:
+ flop(x, y)
+ assert_(False,
+ "Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
+ except FloatingPointError as exc:
+ assert_(str(exc).find(fpeerr) >= 0,
+ "Type %s raised wrong fpe error '%s'." % (ftype, exc))
+
+ def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
+ # Check that fpe exception is raised.
+ #
+ # Given a floating operation `flop` and two scalar values, check that
+ # the operation raises the floating point exception specified by
+ # `fpeerr`. Tests all variants with 0-d array scalars as well.
+
+ self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
+ self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
+ self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
+ self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
+
+ def test_floating_exceptions(self):
+ # Test basic arithmetic function errors
+ with np.errstate(all='raise'):
+ # Test for all real and complex float types
+ for typecode in np.typecodes['AllFloat']:
+ ftype = np.obj2sctype(typecode)
+ if np.dtype(ftype).kind == 'f':
+ # Get some extreme values for the type
+ fi = np.finfo(ftype)
+ ft_tiny = fi.tiny
+ ft_max = fi.max
+ ft_eps = fi.eps
+ underflow = 'underflow'
+ divbyzero = 'divide by zero'
+ else:
+ # 'c', complex, corresponding real dtype
+ rtype = type(ftype(0).real)
+ fi = np.finfo(rtype)
+ ft_tiny = ftype(fi.tiny)
+ ft_max = ftype(fi.max)
+ ft_eps = ftype(fi.eps)
+ # The complex types raise different exceptions
+ underflow = ''
+ divbyzero = ''
+ overflow = 'overflow'
+ invalid = 'invalid'
+
+ self.assert_raises_fpe(underflow,
+ lambda a, b: a/b, ft_tiny, ft_max)
+ self.assert_raises_fpe(underflow,
+ lambda a, b: a*b, ft_tiny, ft_tiny)
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a*b, ft_max, ftype(2))
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a/b, ft_max, ftype(0.5))
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a+b, ft_max, ft_max*ft_eps)
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a-b, -ft_max, ft_max*ft_eps)
+ self.assert_raises_fpe(overflow,
+ np.power, ftype(2), ftype(2**fi.nexp))
+ self.assert_raises_fpe(divbyzero,
+ lambda a, b: a/b, ftype(1), ftype(0))
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a/b, ftype(0), ftype(0))
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a*b, ftype(0), ftype(np.inf))
+
+ def test_warnings(self):
+ # test warning code path
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ with np.errstate(all="warn"):
+ np.divide(1, 0.)
+ assert_equal(len(w), 1)
+ assert_("divide by zero" in str(w[0].message))
+ np.array(1e300) * np.array(1e300)
+ assert_equal(len(w), 2)
+ assert_("overflow" in str(w[-1].message))
+ np.array(np.inf) - np.array(np.inf)
+ assert_equal(len(w), 3)
+ assert_("invalid value" in str(w[-1].message))
+ np.array(1e-300) * np.array(1e-300)
+ assert_equal(len(w), 4)
+ assert_("underflow" in str(w[-1].message))
+
+
+class TestTypes(object):
+ def check_promotion_cases(self, promote_func):
+ # tests that the scalars get coerced correctly.
+ b = np.bool_(0)
+ i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
+ u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
+ f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
+ c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
+
+ # coercion within the same kind
+ assert_equal(promote_func(i8, i16), np.dtype(np.int16))
+ assert_equal(promote_func(i32, i8), np.dtype(np.int32))
+ assert_equal(promote_func(i16, i64), np.dtype(np.int64))
+ assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
+ assert_equal(promote_func(f32, f64), np.dtype(np.float64))
+ assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
+ assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
+ assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
+ assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
+ assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
+
+ # coercion between kinds
+ assert_equal(promote_func(b, i32), np.dtype(np.int32))
+ assert_equal(promote_func(b, u8), np.dtype(np.uint8))
+ assert_equal(promote_func(i8, u8), np.dtype(np.int16))
+ assert_equal(promote_func(u8, i32), np.dtype(np.int32))
+ assert_equal(promote_func(i64, u32), np.dtype(np.int64))
+ assert_equal(promote_func(u64, i32), np.dtype(np.float64))
+ assert_equal(promote_func(i32, f32), np.dtype(np.float64))
+ assert_equal(promote_func(i64, f32), np.dtype(np.float64))
+ assert_equal(promote_func(f32, i16), np.dtype(np.float32))
+ assert_equal(promote_func(f32, u32), np.dtype(np.float64))
+ assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
+ assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
+ assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
+
+ # coercion between scalars and 1-D arrays
+ assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
+ assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
+ assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
+ assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
+ assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
+ assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
+ assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
+ assert_equal(promote_func(np.int32(-1), np.array([u64])),
+ np.dtype(np.float64))
+ assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
+ assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
+ assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
+ assert_equal(promote_func(fld, np.array([c64])),
+ np.dtype(np.complex64))
+ assert_equal(promote_func(c64, np.array([f64])),
+ np.dtype(np.complex128))
+ assert_equal(promote_func(np.complex64(3j), np.array([f64])),
+ np.dtype(np.complex128))
+
+ # coercion between scalars and 1-D arrays, where
+ # the scalar has greater kind than the array
+ assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
+ assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
+ assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
+ assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
+ assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
+
+ # uint and int are treated as the same "kind" for
+ # the purposes of array-scalar promotion.
+ assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
+
+ # float and complex are treated as the same "kind" for
+ # the purposes of array-scalar promotion, so that you can do
+ # (0j + float32array) to get a complex64 array instead of
+ # a complex128 array.
+ assert_equal(promote_func(np.array([f32]), c128),
+ np.dtype(np.complex64))
+
+ def test_coercion(self):
+ def res_type(a, b):
+ return np.add(a, b).dtype
+
+ self.check_promotion_cases(res_type)
+
+ # Use-case: float/complex scalar * bool/int8 array
+ # shouldn't narrow the float/complex type
+ for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
+ b = 1.234 * a
+ assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+ b = np.longdouble(1.234) * a
+ assert_equal(b.dtype, np.dtype(np.longdouble),
+ "array type %s" % a.dtype)
+ b = np.float64(1.234) * a
+ assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+ b = np.float32(1.234) * a
+ assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
+ b = np.float16(1.234) * a
+ assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
+
+ b = 1.234j * a
+ assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+ b = np.clongdouble(1.234j) * a
+ assert_equal(b.dtype, np.dtype(np.clongdouble),
+ "array type %s" % a.dtype)
+ b = np.complex128(1.234j) * a
+ assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+ b = np.complex64(1.234j) * a
+ assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
+
+ # The following use-case is problematic, and to resolve its
+ # tricky side-effects requires more changes.
+ #
+ # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
+ # a float32, shouldn't promote to float64
+ #
+ # a = np.array([1.0, 1.5], dtype=np.float32)
+ # t = np.array([True, False])
+ # b = t*a
+ # assert_equal(b, [1.0, 0.0])
+ # assert_equal(b.dtype, np.dtype('f4'))
+ # b = (1-t)*a
+ # assert_equal(b, [0.0, 1.5])
+ # assert_equal(b.dtype, np.dtype('f4'))
+ #
+ # Probably ~t (bitwise negation) is more proper to use here,
+ # but this is arguably less intuitive to understand at a glance, and
+ # would fail if 't' is actually an integer array instead of boolean:
+ #
+ # b = (~t)*a
+ # assert_equal(b, [0.0, 1.5])
+ # assert_equal(b.dtype, np.dtype('f4'))
+
+ def test_result_type(self):
+ self.check_promotion_cases(np.result_type)
+ assert_(np.result_type(None) == np.dtype(None))
+
+ def test_promote_types_endian(self):
+ # promote_types should always return native-endian types
+ assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8'))
+
+ assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8'))
+ assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8'))
+ assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8'))
+
+ assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8'))
+ assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8'))
+
+ def test_promote_types_strings(self):
+ assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
+ assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
+ assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
+ assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
+ assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
+ assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
+ assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
+ assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
+ assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
+ assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
+ assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
+ assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
+ assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
+ assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
+ assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
+ assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
+ assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
+ assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
+ assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
+ assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
+ assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
+ assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
+ assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
+ assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
+ assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
+ assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
+ assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
+ assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
+ assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
+ assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
+ assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
+ assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
+
+ def test_can_cast(self):
+ assert_(np.can_cast(np.int32, np.int64))
+ assert_(np.can_cast(np.float64, complex))
+ assert_(not np.can_cast(complex, float))
+
+ assert_(np.can_cast('i8', 'f8'))
+ assert_(not np.can_cast('i8', 'f4'))
+ assert_(np.can_cast('i4', 'S11'))
+
+ assert_(np.can_cast('i8', 'i8', 'no'))
+ assert_(not np.can_cast('i8', 'no'))
+
+ assert_(np.can_cast('i8', 'equiv'))
+ assert_(not np.can_cast('i8', 'equiv'))
+
+ assert_(np.can_cast('i8', 'safe'))
+ assert_(not np.can_cast('i4', 'safe'))
+
+ assert_(np.can_cast('i4', 'same_kind'))
+ assert_(not np.can_cast('u4', 'same_kind'))
+
+ assert_(np.can_cast('u4', 'unsafe'))
+
+ assert_(np.can_cast('bool', 'S5'))
+ assert_(not np.can_cast('bool', 'S4'))
+
+ assert_(np.can_cast('b', 'S4'))
+ assert_(not np.can_cast('b', 'S3'))
+
+ assert_(np.can_cast('u1', 'S3'))
+ assert_(not np.can_cast('u1', 'S2'))
+ assert_(np.can_cast('u2', 'S5'))
+ assert_(not np.can_cast('u2', 'S4'))
+ assert_(np.can_cast('u4', 'S10'))
+ assert_(not np.can_cast('u4', 'S9'))
+ assert_(np.can_cast('u8', 'S20'))
+ assert_(not np.can_cast('u8', 'S19'))
+
+ assert_(np.can_cast('i1', 'S4'))
+ assert_(not np.can_cast('i1', 'S3'))
+ assert_(np.can_cast('i2', 'S6'))
+ assert_(not np.can_cast('i2', 'S5'))
+ assert_(np.can_cast('i4', 'S11'))
+ assert_(not np.can_cast('i4', 'S10'))
+ assert_(np.can_cast('i8', 'S21'))
+ assert_(not np.can_cast('i8', 'S20'))
+
+ assert_(np.can_cast('bool', 'S5'))
+ assert_(not np.can_cast('bool', 'S4'))
+
+ assert_(np.can_cast('b', 'U4'))
+ assert_(not np.can_cast('b', 'U3'))
+
+ assert_(np.can_cast('u1', 'U3'))
+ assert_(not np.can_cast('u1', 'U2'))
+ assert_(np.can_cast('u2', 'U5'))
+ assert_(not np.can_cast('u2', 'U4'))
+ assert_(np.can_cast('u4', 'U10'))
+ assert_(not np.can_cast('u4', 'U9'))
+ assert_(np.can_cast('u8', 'U20'))
+ assert_(not np.can_cast('u8', 'U19'))
+
+ assert_(np.can_cast('i1', 'U4'))
+ assert_(not np.can_cast('i1', 'U3'))
+ assert_(np.can_cast('i2', 'U6'))
+ assert_(not np.can_cast('i2', 'U5'))
+ assert_(np.can_cast('i4', 'U11'))
+ assert_(not np.can_cast('i4', 'U10'))
+ assert_(np.can_cast('i8', 'U21'))
+ assert_(not np.can_cast('i8', 'U20'))
+
+ assert_raises(TypeError, np.can_cast, 'i4', None)
+ assert_raises(TypeError, np.can_cast, None, 'i4')
+
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
+ def test_can_cast_values(self):
+ # gh-5917
+ for dt in np.sctypes['int'] + np.sctypes['uint']:
+ ii = np.iinfo(dt)
+ assert_(np.can_cast(ii.min, dt))
+ assert_(np.can_cast(ii.max, dt))
+ assert_(not np.can_cast(ii.min - 1, dt))
+ assert_(not np.can_cast(ii.max + 1, dt))
+
+ for dt in np.sctypes['float']:
+ fi = np.finfo(dt)
+ assert_(np.can_cast(fi.min, dt))
+ assert_(np.can_cast(fi.max, dt))
+
+
+# Custom exception class to test exception propagation in fromiter
+class NIterError(Exception):
+ pass
+
+
+class TestFromiter(object):
+ def makegen(self):
+ for x in range(24):
+ yield x**2
+
+ def test_types(self):
+ ai32 = np.fromiter(self.makegen(), np.int32)
+ ai64 = np.fromiter(self.makegen(), np.int64)
+ af = np.fromiter(self.makegen(), float)
+ assert_(ai32.dtype == np.dtype(np.int32))
+ assert_(ai64.dtype == np.dtype(np.int64))
+ assert_(af.dtype == np.dtype(float))
+
+ def test_lengths(self):
+ expected = np.array(list(self.makegen()))
+ a = np.fromiter(self.makegen(), int)
+ a20 = np.fromiter(self.makegen(), int, 20)
+ assert_(len(a) == len(expected))
+ assert_(len(a20) == 20)
+ assert_raises(ValueError, np.fromiter,
+ self.makegen(), int, len(expected) + 10)
+
+ def test_values(self):
+ expected = np.array(list(self.makegen()))
+ a = np.fromiter(self.makegen(), int)
+ a20 = np.fromiter(self.makegen(), int, 20)
+ assert_(np.alltrue(a == expected, axis=0))
+ assert_(np.alltrue(a20 == expected[:20], axis=0))
+
+ def load_data(self, n, eindex):
+ # Utility method for the issue 2592 tests.
+ # Raise an exception at the desired index in the iterator.
+ for e in range(n):
+ if e == eindex:
+ raise NIterError('error at index %s' % eindex)
+ yield e
+
+ def test_2592(self):
+ # Test iteration exceptions are correctly raised.
+ count, eindex = 10, 5
+ assert_raises(NIterError, np.fromiter,
+ self.load_data(count, eindex), dtype=int, count=count)
+
+ def test_2592_edge(self):
+ # Test iter. exceptions, edge case (exception at end of iterator).
+ count = 10
+ eindex = count-1
+ assert_raises(NIterError, np.fromiter,
+ self.load_data(count, eindex), dtype=int, count=count)
+
+
+class TestNonzero(object):
+ def test_nonzero_trivial(self):
+ assert_equal(np.count_nonzero(np.array([])), 0)
+ assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
+ assert_equal(np.nonzero(np.array([])), ([],))
+
+ assert_equal(np.count_nonzero(np.array(0)), 0)
+ assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
+ assert_equal(np.nonzero(np.array(0)), ([],))
+ assert_equal(np.count_nonzero(np.array(1)), 1)
+ assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
+ assert_equal(np.nonzero(np.array(1)), ([0],))
+
+ def test_nonzero_onedim(self):
+ x = np.array([1, 0, 2, -1, 0, 0, 8])
+ assert_equal(np.count_nonzero(x), 4)
+ assert_equal(np.count_nonzero(x), 4)
+ assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
+
+ x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
+ dtype=[('a', 'i4'), ('b', 'i2')])
+ assert_equal(np.count_nonzero(x['a']), 3)
+ assert_equal(np.count_nonzero(x['b']), 4)
+ assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
+ assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
+
+ def test_nonzero_twodim(self):
+ x = np.array([[0, 1, 0], [2, 0, 3]])
+ assert_equal(np.count_nonzero(x), 3)
+ assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
+
+ x = np.eye(3)
+ assert_equal(np.count_nonzero(x), 3)
+ assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
+
+ x = np.array([[(0, 1), (0, 0), (1, 11)],
+ [(1, 1), (1, 0), (0, 0)],
+ [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
+ assert_equal(np.count_nonzero(x['a']), 4)
+ assert_equal(np.count_nonzero(x['b']), 5)
+ assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
+ assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
+
+ assert_(not x['a'].T.flags.aligned)
+ assert_equal(np.count_nonzero(x['a'].T), 4)
+ assert_equal(np.count_nonzero(x['b'].T), 5)
+ assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
+ assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
+
+ def test_sparse(self):
+ # test special sparse condition boolean code path
+ for i in range(20):
+ c = np.zeros(200, dtype=bool)
+ c[i::20] = True
+ assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
+
+ c = np.zeros(400, dtype=bool)
+ c[10 + i:20 + i] = True
+ c[20 + i*2] = True
+ assert_equal(np.nonzero(c)[0],
+ np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
+
+ def test_return_type(self):
+ class C(np.ndarray):
+ pass
+
+ for view in (C, np.ndarray):
+ for nd in range(1, 4):
+ shape = tuple(range(2, 2+nd))
+ x = np.arange(np.prod(shape)).reshape(shape).view(view)
+ for nzx in (np.nonzero(x), x.nonzero()):
+ for nzx_i in nzx:
+ assert_(type(nzx_i) is np.ndarray)
+ assert_(nzx_i.flags.writeable)
+
+ def test_count_nonzero_axis(self):
+ # Basic check of functionality
+ m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
+
+ expected = np.array([1, 1, 1, 1, 1])
+ assert_equal(np.count_nonzero(m, axis=0), expected)
+
+ expected = np.array([2, 3])
+ assert_equal(np.count_nonzero(m, axis=1), expected)
+
+ assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
+ assert_raises(TypeError, np.count_nonzero, m, axis='foo')
+ assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
+ assert_raises(TypeError, np.count_nonzero,
+ m, axis=np.array([[1], [2]]))
+
+ def test_count_nonzero_axis_all_dtypes(self):
+ # More thorough test that the axis argument is respected
+ # for all dtypes and responds correctly when presented with
+ # either integer or tuple arguments for axis
+ msg = "Mismatch for dtype: %s"
+
+ def assert_equal_w_dt(a, b, err_msg):
+ assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+ assert_equal(a, b, err_msg=err_msg)
+
+ for dt in np.typecodes['All']:
+ err_msg = msg % (np.dtype(dt).name,)
+
+ if dt != 'V':
+ if dt != 'M':
+ m = np.zeros((3, 3), dtype=dt)
+ n = np.ones(1, dtype=dt)
+
+ m[0, 0] = n[0]
+ m[1, 0] = n[0]
+
+ else: # np.zeros doesn't work for np.datetime64
+ m = np.array(['1970-01-01'] * 9)
+ m = m.reshape((3, 3))
+
+ m[0, 0] = '1970-01-12'
+ m[1, 0] = '1970-01-12'
+ m = m.astype(dt)
+
+ expected = np.array([2, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([1, 1, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(2)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ if dt == 'V':
+ # There are no 'nonzero' objects for np.void, so the testing
+ # setup is slightly different for this dtype
+ m = np.array([np.void(1)] * 6).reshape((2, 3))
+
+ expected = np.array([0, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(0)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ def test_count_nonzero_axis_consistent(self):
+ # Check that the axis behaviour for valid axes in
+ # non-special cases is consistent (and therefore
+ # correct) by checking it against an integer array
+ # that is then casted to the generic object dtype
+ from itertools import combinations, permutations
+
+ axis = (0, 1, 2, 3)
+ size = (5, 5, 5, 5)
+ msg = "Mismatch for axis: %s"
+
+ rng = np.random.RandomState(1234)
+ m = rng.randint(-100, 100, size=size)
+ n = m.astype(object)
+
+ for length in range(len(axis)):
+ for combo in combinations(axis, length):
+ for perm in permutations(combo):
+ assert_equal(
+ np.count_nonzero(m, axis=perm),
+ np.count_nonzero(n, axis=perm),
+ err_msg=msg % (perm,))
+
+ def test_countnonzero_axis_empty(self):
+ a = np.array([[0, 0, 1], [1, 0, 1]])
+ assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
+
+ def test_array_method(self):
+ # Tests that the array method
+ # call to nonzero works
+ m = np.array([[1, 0, 0], [4, 0, 6]])
+ tgt = [[0, 1, 1], [0, 0, 2]]
+
+ assert_equal(m.nonzero(), tgt)
+
+ def test_nonzero_invalid_object(self):
+ # gh-9295
+ a = np.array([np.array([1, 2]), 3])
+ assert_raises(ValueError, np.nonzero, a)
+
+ class BoolErrors:
+ def __bool__(self):
+ raise ValueError("Not allowed")
+ def __nonzero__(self):
+ raise ValueError("Not allowed")
+
+ assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+
+
+class TestIndex(object):
+ def test_boolean(self):
+ a = rand(3, 5, 8)
+ V = rand(5, 8)
+ g1 = randint(0, 5, size=15)
+ g2 = randint(0, 8, size=15)
+ V[g1, g2] = -V[g1, g2]
+ assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
+
+ def test_boolean_edgecase(self):
+ a = np.array([], dtype='int32')
+ b = np.array([], dtype='bool')
+ c = a[b]
+ assert_equal(c, [])
+ assert_equal(c.dtype, np.dtype('int32'))
+
+
+class TestBinaryRepr(object):
+ def test_zero(self):
+ assert_equal(np.binary_repr(0), '0')
+
+ def test_positive(self):
+ assert_equal(np.binary_repr(10), '1010')
+ assert_equal(np.binary_repr(12522),
+ '11000011101010')
+ assert_equal(np.binary_repr(10736848),
+ '101000111101010011010000')
+
+ def test_negative(self):
+ assert_equal(np.binary_repr(-1), '-1')
+ assert_equal(np.binary_repr(-10), '-1010')
+ assert_equal(np.binary_repr(-12522),
+ '-11000011101010')
+ assert_equal(np.binary_repr(-10736848),
+ '-101000111101010011010000')
+
+ def test_sufficient_width(self):
+ assert_equal(np.binary_repr(0, width=5), '00000')
+ assert_equal(np.binary_repr(10, width=7), '0001010')
+ assert_equal(np.binary_repr(-5, width=7), '1111011')
+
+ def test_neg_width_boundaries(self):
+ # see gh-8670
+
+ # Ensure that the example in the issue does not
+ # break before proceeding to a more thorough test.
+ assert_equal(np.binary_repr(-128, width=8), '10000000')
+
+ for width in range(1, 11):
+ num = -2**(width - 1)
+ exp = '1' + (width - 1) * '0'
+ assert_equal(np.binary_repr(num, width=width), exp)
+
+
+class TestBaseRepr(object):
+ def test_base3(self):
+ assert_equal(np.base_repr(3**5, 3), '100000')
+
+ def test_positive(self):
+ assert_equal(np.base_repr(12, 10), '12')
+ assert_equal(np.base_repr(12, 10, 4), '000012')
+ assert_equal(np.base_repr(12, 4), '30')
+ assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
+
+ def test_negative(self):
+ assert_equal(np.base_repr(-12, 10), '-12')
+ assert_equal(np.base_repr(-12, 10, 4), '-000012')
+ assert_equal(np.base_repr(-12, 4), '-30')
+
+ def test_base_range(self):
+ with assert_raises(ValueError):
+ np.base_repr(1, 1)
+ with assert_raises(ValueError):
+ np.base_repr(1, 37)
+
+
+class TestArrayComparisons(object):
+ def test_array_equal(self):
+ res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
+ np.array([('a', 1)], dtype='S1,u4'))
+ assert_(res)
+ assert_(type(res) is bool)
+
+ def test_none_compares_elementwise(self):
+ a = np.array([None, 1, None], dtype=object)
+ assert_equal(a == None, [True, False, True])
+ assert_equal(a != None, [False, True, False])
+
+ a = np.ones(3)
+ assert_equal(a == None, [False, False, False])
+ assert_equal(a != None, [True, True, True])
+
+ def test_array_equiv(self):
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+
+ res = np.array_equiv(np.array([1, 1]), np.array([1]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([2]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
+ assert_(not res)
+ assert_(type(res) is bool)
+
+
+def assert_array_strict_equal(x, y):
+ assert_array_equal(x, y)
+ # Check flags, 32 bit arches typically don't provide 16 byte alignment
+ if ((x.dtype.alignment <= 8 or
+ np.intp().dtype.itemsize != 4) and
+ sys.platform != 'win32'):
+ assert_(x.flags == y.flags)
+ else:
+ assert_(x.flags.owndata == y.flags.owndata)
+ assert_(x.flags.writeable == y.flags.writeable)
+ assert_(x.flags.c_contiguous == y.flags.c_contiguous)
+ assert_(x.flags.f_contiguous == y.flags.f_contiguous)
+ assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
+ # check endianness
+ assert_(x.dtype.isnative == y.dtype.isnative)
+
+
+class TestClip(object):
+ def setup(self):
+ self.nr = 5
+ self.nc = 3
+
+ def fastclip(self, a, m, M, out=None):
+ if out is None:
+ return a.clip(m, M)
+ else:
+ return a.clip(m, M, out)
+
+ def clip(self, a, m, M, out=None):
+ # use slow-clip
+ selector = np.less(a, m) + 2*np.greater(a, M)
+ return selector.choose((a, m, M), out=out)
+
+ # Handy functions
+ def _generate_data(self, n, m):
+ return randn(n, m)
+
+ def _generate_data_complex(self, n, m):
+ return randn(n, m) + 1.j * rand(n, m)
+
+ def _generate_flt_data(self, n, m):
+ return (randn(n, m)).astype(np.float32)
+
+ def _neg_byteorder(self, a):
+ a = np.asarray(a)
+ if sys.byteorder == 'little':
+ a = a.astype(a.dtype.newbyteorder('>'))
+ else:
+ a = a.astype(a.dtype.newbyteorder('<'))
+ return a
+
+ def _generate_non_native_data(self, n, m):
+ data = randn(n, m)
+ data = self._neg_byteorder(data)
+ assert_(not data.dtype.isnative)
+ return data
+
+ def _generate_int_data(self, n, m):
+ return (10 * rand(n, m)).astype(np.int64)
+
+ def _generate_int32_data(self, n, m):
+ return (10 * rand(n, m)).astype(np.int32)
+
+ # Now the real test cases
+ def test_simple_double(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = 0.1
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int(self):
+ # Test native int input with scalar min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ a = a.astype(int)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_array_double(self):
+ # Test native double input with array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = np.zeros(a.shape)
+ M = m + 0.5
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_nonnative(self):
+ # Test non native double input with scalar min/max.
+ # Test native double input with non native double scalar min/max.
+ a = self._generate_non_native_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_equal(ac, act)
+
+ # Test native double input with non native double scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = self._neg_byteorder(0.6)
+ assert_(not M.dtype.isnative)
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_equal(ac, act)
+
+ def test_simple_complex(self):
+ # Test native complex input with native double scalar min/max.
+ # Test native input with complex double scalar min/max.
+ a = 3 * self._generate_data_complex(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ # Test native input with complex double scalar min/max.
+ a = 3 * self._generate_data(self.nr, self.nc)
+ m = -0.5 + 1.j
+ M = 1. + 2.j
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_complex(self):
+ # Address Issue gh-5354 for clipping complex arrays
+ # Test native complex input without explicit min/max
+ # ie, either min=None or max=None
+ a = np.ones(10, dtype=complex)
+ m = a.min()
+ M = a.max()
+ am = self.fastclip(a, m, None)
+ aM = self.fastclip(a, None, M)
+ assert_array_strict_equal(am, a)
+ assert_array_strict_equal(aM, a)
+
+ def test_clip_non_contig(self):
+ # Test clip for non contiguous native input and native scalar min/max.
+ a = self._generate_data(self.nr * 2, self.nc * 3)
+ a = a[::2, ::3]
+ assert_(not a.flags['F_CONTIGUOUS'])
+ assert_(not a.flags['C_CONTIGUOUS'])
+ ac = self.fastclip(a, -1.6, 1.7)
+ act = self.clip(a, -1.6, 1.7)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_out(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = np.zeros(a.shape)
+ act = np.zeros(a.shape)
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int32_inout(self):
+ # Test native int32 input with double min/max and int32 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float64(0)
+ M = np.float64(2)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int64_out(self):
+ # Test native int32 input with int32 scalar min/max and int64 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.int32(-1)
+ M = np.int32(1)
+ ac = np.zeros(a.shape, dtype=np.int64)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int64_inout(self):
+ # Test native int32 input with double array min/max and int32 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.zeros(a.shape, np.float64)
+ M = np.float64(1)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int32_out(self):
+ # Test native double input with scalar min/max and int out.
+ a = self._generate_data(self.nr, self.nc)
+ m = -1.0
+ M = 2.0
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_inplace_01(self):
+ # Test native double input with array min/max in-place.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = np.zeros(a.shape)
+ M = 1.0
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_simple_inplace_02(self):
+ # Test native double input with scalar min/max in-place.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(ac, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_noncontig_inplace(self):
+ # Test non contiguous double input with double scalar min/max in-place.
+ a = self._generate_data(self.nr * 2, self.nc * 3)
+ a = a[::2, ::3]
+ assert_(not a.flags['F_CONTIGUOUS'])
+ assert_(not a.flags['C_CONTIGUOUS'])
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(ac, m, M, ac)
+ assert_array_equal(a, ac)
+
+ def test_type_cast_01(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_02(self):
+ # Test native int32 input with int32 scalar min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ a = a.astype(np.int32)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_03(self):
+ # Test native int32 input with float64 scalar min/max.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, np.float64(m), np.float64(M))
+ act = self.clip(a, np.float64(m), np.float64(M))
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_04(self):
+ # Test native int32 input with float32 scalar min/max.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float32(-2)
+ M = np.float32(4)
+ act = self.fastclip(a, m, M)
+ ac = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_05(self):
+ # Test native int32 with double arrays min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ ac = self.fastclip(a, m * np.zeros(a.shape), M)
+ act = self.clip(a, m * np.zeros(a.shape), M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_06(self):
+ # Test native with NON native scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = 0.5
+ m_s = self._neg_byteorder(m)
+ M = 1.
+ act = self.clip(a, m_s, M)
+ ac = self.fastclip(a, m_s, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_07(self):
+ # Test NON native with native array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5 * np.ones(a.shape)
+ M = 1.
+ a_s = self._neg_byteorder(a)
+ assert_(not a_s.dtype.isnative)
+ act = a_s.clip(m, M)
+ ac = self.fastclip(a_s, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_08(self):
+ # Test NON native with native scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ a_s = self._neg_byteorder(a)
+ assert_(not a_s.dtype.isnative)
+ ac = self.fastclip(a_s, m, M)
+ act = a_s.clip(m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_09(self):
+ # Test native with NON native array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5 * np.ones(a.shape)
+ M = 1.
+ m_s = self._neg_byteorder(m)
+ assert_(not m_s.dtype.isnative)
+ ac = self.fastclip(a, m_s, M)
+ act = self.clip(a, m_s, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_10(self):
+ # Test native int32 with float min/max and float out for output argument.
+ a = self._generate_int_data(self.nr, self.nc)
+ b = np.zeros(a.shape, dtype=np.float32)
+ m = np.float32(-0.5)
+ M = np.float32(1)
+ act = self.clip(a, m, M, out=b)
+ ac = self.fastclip(a, m, M, out=b)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_11(self):
+ # Test non native with native scalar, min/max, out non native
+ a = self._generate_non_native_data(self.nr, self.nc)
+ b = a.copy()
+ b = b.astype(b.dtype.newbyteorder('>'))
+ bt = b.copy()
+ m = -0.5
+ M = 1.
+ self.fastclip(a, m, M, out=b)
+ self.clip(a, m, M, out=bt)
+ assert_array_strict_equal(b, bt)
+
+ def test_type_cast_12(self):
+ # Test native int32 input and min/max and float out
+ a = self._generate_int_data(self.nr, self.nc)
+ b = np.zeros(a.shape, dtype=np.float32)
+ m = np.int32(0)
+ M = np.int32(1)
+ act = self.clip(a, m, M, out=b)
+ ac = self.fastclip(a, m, M, out=b)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple(self):
+ # Test native double input with scalar min/max
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = np.zeros(a.shape)
+ act = np.zeros(a.shape)
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple2(self):
+ # Test native int32 input with double min/max and int32 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float64(0)
+ M = np.float64(2)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple_int32(self):
+ # Test native int32 input with int32 scalar min/max and int64 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.int32(-1)
+ M = np.int32(1)
+ ac = np.zeros(a.shape, dtype=np.int64)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_array_int32(self):
+ # Test native int32 input with double array min/max and int32 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.zeros(a.shape, np.float64)
+ M = np.float64(1)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_array_outint32(self):
+ # Test native double input with scalar min/max and int out
+ a = self._generate_data(self.nr, self.nc)
+ m = -1.0
+ M = 2.0
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_transposed(self):
+ # Test that the out argument works when tranposed
+ a = np.arange(16).reshape(4, 4)
+ out = np.empty_like(a).T
+ a.clip(4, 10, out=out)
+ expected = self.clip(a, 4, 10)
+ assert_array_equal(out, expected)
+
+ def test_clip_with_out_memory_overlap(self):
+ # Test that the out argument works when it has memory overlap
+ a = np.arange(16).reshape(4, 4)
+ ac = a.copy()
+ a[:-1].clip(4, 10, out=a[1:])
+ expected = self.clip(ac[:-1], 4, 10)
+ assert_array_equal(a[1:], expected)
+
+ def test_clip_inplace_array(self):
+ # Test native double input with array min/max
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = np.zeros(a.shape)
+ M = 1.0
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_clip_inplace_simple(self):
+ # Test native double input with scalar min/max
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_clip_func_takes_out(self):
+ # Ensure that the clip() function takes an out=argument.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ a2 = np.clip(a, m, M, out=a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a2, ac)
+ assert_(a2 is a)
+
+ def test_clip_nan(self):
+ d = np.arange(7.)
+ assert_equal(d.clip(min=np.nan), d)
+ assert_equal(d.clip(max=np.nan), d)
+ assert_equal(d.clip(min=np.nan, max=np.nan), d)
+ assert_equal(d.clip(min=-2, max=np.nan), d)
+ assert_equal(d.clip(min=np.nan, max=10), d)
+
+
+class TestAllclose(object):
+ rtol = 1e-5
+ atol = 1e-8
+
+ def setup(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown(self):
+ np.seterr(**self.olderr)
+
+ def tst_allclose(self, x, y):
+ assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
+
+ def tst_not_allclose(self, x, y):
+ assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
+
+ def test_ip_allclose(self):
+ # Parametric test factory.
+ arr = np.array([100, 1000])
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ atol = self.atol
+ rtol = self.rtol
+
+ data = [([1, 0], [1, 0]),
+ ([atol], [0]),
+ ([1], [1+rtol+atol]),
+ (arr, arr + arr*rtol),
+ (arr, arr + arr*rtol + atol*2),
+ (aran, aran + aran*rtol),
+ (np.inf, np.inf),
+ (np.inf, [np.inf])]
+
+ for (x, y) in data:
+ self.tst_allclose(x, y)
+
+ def test_ip_not_allclose(self):
+ # Parametric test factory.
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ atol = self.atol
+ rtol = self.rtol
+
+ data = [([np.inf, 0], [1, np.inf]),
+ ([np.inf, 0], [1, 0]),
+ ([np.inf, np.inf], [1, np.inf]),
+ ([np.inf, np.inf], [1, 0]),
+ ([-np.inf, 0], [np.inf, 0]),
+ ([np.nan, 0], [np.nan, 0]),
+ ([atol*2], [0]),
+ ([1], [1+rtol+atol*2]),
+ (aran, aran + aran*atol + atol*2),
+ (np.array([np.inf, 1]), np.array([0, np.inf]))]
+
+ for (x, y) in data:
+ self.tst_not_allclose(x, y)
+
+ def test_no_parameter_modification(self):
+ x = np.array([np.inf, 1])
+ y = np.array([0, np.inf])
+ np.allclose(x, y)
+ assert_array_equal(x, np.array([np.inf, 1]))
+ assert_array_equal(y, np.array([0, np.inf]))
+
+ def test_min_int(self):
+ # Could make problems because of abs(min_int) == min_int
+ min_int = np.iinfo(np.int_).min
+ a = np.array([min_int], dtype=np.int_)
+ assert_(np.allclose(a, a))
+
+ def test_equalnan(self):
+ x = np.array([1.0, np.nan])
+ assert_(np.allclose(x, x, equal_nan=True))
+
+ def test_return_class_is_ndarray(self):
+ # Issue gh-6475
+ # Check that allclose does not preserve subtypes
+ class Foo(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ a = Foo([1])
+ assert_(type(np.allclose(a, a)) is bool)
+
+
+class TestIsclose(object):
+ rtol = 1e-5
+ atol = 1e-8
+
+ def setup(self):
+ atol = self.atol
+ rtol = self.rtol
+ arr = np.array([100, 1000])
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ self.all_close_tests = [
+ ([1, 0], [1, 0]),
+ ([atol], [0]),
+ ([1], [1 + rtol + atol]),
+ (arr, arr + arr*rtol),
+ (arr, arr + arr*rtol + atol),
+ (aran, aran + aran*rtol),
+ (np.inf, np.inf),
+ (np.inf, [np.inf]),
+ ([np.inf, -np.inf], [np.inf, -np.inf]),
+ ]
+ self.none_close_tests = [
+ ([np.inf, 0], [1, np.inf]),
+ ([np.inf, -np.inf], [1, 0]),
+ ([np.inf, np.inf], [1, -np.inf]),
+ ([np.inf, np.inf], [1, 0]),
+ ([np.nan, 0], [np.nan, -np.inf]),
+ ([atol*2], [0]),
+ ([1], [1 + rtol + atol*2]),
+ (aran, aran + rtol*1.1*aran + atol*1.1),
+ (np.array([np.inf, 1]), np.array([0, np.inf])),
+ ]
+ self.some_close_tests = [
+ ([np.inf, 0], [np.inf, atol*2]),
+ ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
+ (np.arange(3), [0, 1, 2.1]),
+ (np.nan, [np.nan, np.nan, np.nan]),
+ ([0], [atol, np.inf, -np.inf, np.nan]),
+ (0, [atol, np.inf, -np.inf, np.nan]),
+ ]
+ self.some_close_results = [
+ [True, False],
+ [True, False, False],
+ [True, True, False],
+ [False, False, False],
+ [True, False, False, False],
+ [True, False, False, False],
+ ]
+
+ def test_ip_isclose(self):
+ self.setup()
+ tests = self.some_close_tests
+ results = self.some_close_results
+ for (x, y), result in zip(tests, results):
+ assert_array_equal(np.isclose(x, y), result)
+
+ def tst_all_isclose(self, x, y):
+ assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
+
+ def tst_none_isclose(self, x, y):
+ msg = "%s and %s shouldn't be close"
+ assert_(not np.any(np.isclose(x, y)), msg % (x, y))
+
+ def tst_isclose_allclose(self, x, y):
+ msg = "isclose.all() and allclose aren't same for %s and %s"
+ msg2 = "isclose and allclose aren't same for %s and %s"
+ if np.isscalar(x) and np.isscalar(y):
+ assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
+ else:
+ assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
+
+ def test_ip_all_isclose(self):
+ self.setup()
+ for (x, y) in self.all_close_tests:
+ self.tst_all_isclose(x, y)
+
+ def test_ip_none_isclose(self):
+ self.setup()
+ for (x, y) in self.none_close_tests:
+ self.tst_none_isclose(x, y)
+
+ def test_ip_isclose_allclose(self):
+ self.setup()
+ tests = (self.all_close_tests + self.none_close_tests +
+ self.some_close_tests)
+ for (x, y) in tests:
+ self.tst_isclose_allclose(x, y)
+
+ def test_equal_nan(self):
+ assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
+ arr = np.array([1.0, np.nan])
+ assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
+
+ def test_masked_arrays(self):
+ # Make sure to test the output type when arguments are interchanged.
+
+ x = np.ma.masked_where([True, True, False], np.arange(3))
+ assert_(type(x) is type(np.isclose(2, x)))
+ assert_(type(x) is type(np.isclose(x, 2)))
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
+ assert_(type(x) is type(np.isclose(np.inf, x)))
+ assert_(type(x) is type(np.isclose(x, np.inf)))
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+ y = np.isclose(np.nan, x, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+ y = np.isclose(x, np.nan, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+ y = np.isclose(x, x, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+
+ def test_scalar_return(self):
+ assert_(np.isscalar(np.isclose(1, 1)))
+
+ def test_no_parameter_modification(self):
+ x = np.array([np.inf, 1])
+ y = np.array([0, np.inf])
+ np.isclose(x, y)
+ assert_array_equal(x, np.array([np.inf, 1]))
+ assert_array_equal(y, np.array([0, np.inf]))
+
+ def test_non_finite_scalar(self):
+ # GH7014, when two scalars are compared the output should also be a
+ # scalar
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
+
+
+class TestStdVar(object):
+ def setup(self):
+ self.A = np.array([1, -1, 1, -1])
+ self.real_var = 1
+
+ def test_basic(self):
+ assert_almost_equal(np.var(self.A), self.real_var)
+ assert_almost_equal(np.std(self.A)**2, self.real_var)
+
+ def test_scalars(self):
+ assert_equal(np.var(1), 0)
+ assert_equal(np.std(1), 0)
+
+ def test_ddof1(self):
+ assert_almost_equal(np.var(self.A, ddof=1),
+ self.real_var*len(self.A)/float(len(self.A)-1))
+ assert_almost_equal(np.std(self.A, ddof=1)**2,
+ self.real_var*len(self.A)/float(len(self.A)-1))
+
+ def test_ddof2(self):
+ assert_almost_equal(np.var(self.A, ddof=2),
+ self.real_var*len(self.A)/float(len(self.A)-2))
+ assert_almost_equal(np.std(self.A, ddof=2)**2,
+ self.real_var*len(self.A)/float(len(self.A)-2))
+
+ def test_out_scalar(self):
+ d = np.arange(10)
+ out = np.array(0.)
+ r = np.std(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.var(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.mean(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+
+
+class TestStdVarComplex(object):
+ def test_basic(self):
+ A = np.array([1, 1.j, -1, -1.j])
+ real_var = 1
+ assert_almost_equal(np.var(A), real_var)
+ assert_almost_equal(np.std(A)**2, real_var)
+
+ def test_scalars(self):
+ assert_equal(np.var(1j), 0)
+ assert_equal(np.std(1j), 0)
+
+
+class TestCreationFuncs(object):
+ # Test ones, zeros, empty and full.
+
+ def setup(self):
+ dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
+ # void, bytes, str
+ variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
+ self.dtypes = sorted(dtypes - variable_sized |
+ {np.dtype(tp.str.replace("0", str(i)))
+ for tp in variable_sized for i in range(1, 10)},
+ key=lambda dtype: dtype.str)
+ self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
+ self.ndims = 10
+
+ def check_function(self, func, fill_value=None):
+ par = ((0, 1, 2),
+ range(self.ndims),
+ self.orders,
+ self.dtypes)
+ fill_kwarg = {}
+ if fill_value is not None:
+ fill_kwarg = {'fill_value': fill_value}
+
+ for size, ndims, order, dtype in itertools.product(*par):
+ shape = ndims * [size]
+
+ # do not fill void type
+ if fill_kwarg and dtype.str.startswith('|V'):
+ continue
+
+ arr = func(shape, order=order, dtype=dtype,
+ **fill_kwarg)
+
+ assert_equal(arr.dtype, dtype)
+ assert_(getattr(arr.flags, self.orders[order]))
+
+ if fill_value is not None:
+ if dtype.str.startswith('|S'):
+ val = str(fill_value)
+ else:
+ val = fill_value
+ assert_equal(arr, dtype.type(val))
+
+ def test_zeros(self):
+ self.check_function(np.zeros)
+
+ def test_ones(self):
+ self.check_function(np.zeros)
+
+ def test_empty(self):
+ self.check_function(np.empty)
+
+ def test_full(self):
+ self.check_function(np.full, 0)
+ self.check_function(np.full, 1)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_for_reference_leak(self):
+ # Make sure we have an object for reference
+ dim = 1
+ beg = sys.getrefcount(dim)
+ np.zeros([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.ones([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.empty([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.full([dim]*10, 0)
+ assert_(sys.getrefcount(dim) == beg)
+
+
+class TestLikeFuncs(object):
+ '''Test ones_like, zeros_like, empty_like and full_like'''
+
+ def setup(self):
+ self.data = [
+ # Array scalars
+ (np.array(3.), None),
+ (np.array(3), 'f8'),
+ # 1D arrays
+ (np.arange(6, dtype='f4'), None),
+ (np.arange(6), 'c16'),
+ # 2D C-layout arrays
+ (np.arange(6).reshape(2, 3), None),
+ (np.arange(6).reshape(3, 2), 'i1'),
+ # 2D F-layout arrays
+ (np.arange(6).reshape((2, 3), order='F'), None),
+ (np.arange(6).reshape((3, 2), order='F'), 'i1'),
+ # 3D C-layout arrays
+ (np.arange(24).reshape(2, 3, 4), None),
+ (np.arange(24).reshape(4, 3, 2), 'f4'),
+ # 3D F-layout arrays
+ (np.arange(24).reshape((2, 3, 4), order='F'), None),
+ (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
+ # 3D non-C/F-layout arrays
+ (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
+ (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
+ ]
+
+ def compare_array_value(self, dz, value, fill_value):
+ if value is not None:
+ if fill_value:
+ try:
+ z = dz.dtype.type(value)
+ except OverflowError:
+ pass
+ else:
+ assert_(np.all(dz == z))
+ else:
+ assert_(np.all(dz == value))
+
+ def check_like_function(self, like_function, value, fill_value=False):
+ if fill_value:
+ fill_kwarg = {'fill_value': value}
+ else:
+ fill_kwarg = {}
+ for d, dtype in self.data:
+ # default (K) order, dtype
+ dz = like_function(d, dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_equal(np.array(dz.strides)*d.dtype.itemsize,
+ np.array(d.strides)*dz.dtype.itemsize)
+ assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
+ assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # C order, default dtype
+ dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_(dz.flags.c_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # F order, default dtype
+ dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_(dz.flags.f_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # A order
+ dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ if d.flags.f_contiguous:
+ assert_(dz.flags.f_contiguous)
+ else:
+ assert_(dz.flags.c_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # Test the 'subok' parameter
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
+
+ b = like_function(a, **fill_kwarg)
+ assert_(type(b) is MyNDArray)
+
+ b = like_function(a, subok=False, **fill_kwarg)
+ assert_(type(b) is not MyNDArray)
+
+ def test_ones_like(self):
+ self.check_like_function(np.ones_like, 1)
+
+ def test_zeros_like(self):
+ self.check_like_function(np.zeros_like, 0)
+
+ def test_empty_like(self):
+ self.check_like_function(np.empty_like, None)
+
+ def test_filled_like(self):
+ self.check_like_function(np.full_like, 0, True)
+ self.check_like_function(np.full_like, 1, True)
+ self.check_like_function(np.full_like, 1000, True)
+ self.check_like_function(np.full_like, 123.456, True)
+ self.check_like_function(np.full_like, np.inf, True)
+
+
+class TestCorrelate(object):
+ def _setup(self, dt):
+ self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
+ self.xs = np.arange(1, 20)[::3]
+ self.y = np.array([-1, -2, -3], dtype=dt)
+ self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
+ self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
+ self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
+ self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
+ self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
+ self.zs = np.array([-3., -14., -30., -48., -66., -84.,
+ -102., -54., -19.], dtype=dt)
+
+ def test_float(self):
+ self._setup(float)
+ z = np.correlate(self.x, self.y, 'full')
+ assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.x, self.y[:-1], 'full')
+ assert_array_almost_equal(z, self.z1_4)
+ z = np.correlate(self.y, self.x, 'full')
+ assert_array_almost_equal(z, self.z2)
+ z = np.correlate(self.x[::-1], self.y, 'full')
+ assert_array_almost_equal(z, self.z1r)
+ z = np.correlate(self.y, self.x[::-1], 'full')
+ assert_array_almost_equal(z, self.z2r)
+ z = np.correlate(self.xs, self.y, 'full')
+ assert_array_almost_equal(z, self.zs)
+
+ def test_object(self):
+ self._setup(Decimal)
+ z = np.correlate(self.x, self.y, 'full')
+ assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.y, self.x, 'full')
+ assert_array_almost_equal(z, self.z2)
+
+ def test_no_overwrite(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ np.correlate(d, k)
+ assert_array_equal(d, np.ones(100))
+ assert_array_equal(k, np.ones(3))
+
+ def test_complex(self):
+ x = np.array([1, 2, 3, 4+1j], dtype=complex)
+ y = np.array([-1, -2j, 3+1j], dtype=complex)
+ r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
+ r_z = r_z[::-1].conjugate()
+ z = np.correlate(y, x, mode='full')
+ assert_array_almost_equal(z, r_z)
+
+
+class TestConvolve(object):
+ def test_object(self):
+ d = [1.] * 100
+ k = [1.] * 3
+ assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
+
+ def test_no_overwrite(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ np.convolve(d, k)
+ assert_array_equal(d, np.ones(100))
+ assert_array_equal(k, np.ones(3))
+
+
+class TestArgwhere(object):
+ def test_2D(self):
+ x = np.arange(6).reshape((2, 3))
+ assert_array_equal(np.argwhere(x > 1),
+ [[0, 2],
+ [1, 0],
+ [1, 1],
+ [1, 2]])
+
+ def test_list(self):
+ assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
+
+
+class TestStringFunction(object):
+
+ def test_set_string_function(self):
+ a = np.array([1])
+ np.set_string_function(lambda x: "FOO", repr=True)
+ assert_equal(repr(a), "FOO")
+ np.set_string_function(None, repr=True)
+ assert_equal(repr(a), "array([1])")
+
+ np.set_string_function(lambda x: "FOO", repr=False)
+ assert_equal(str(a), "FOO")
+ np.set_string_function(None, repr=False)
+ assert_equal(str(a), "[1]")
+
+
+class TestRoll(object):
+ def test_roll1d(self):
+ x = np.arange(10)
+ xr = np.roll(x, 2)
+ assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
+
+ def test_roll2d(self):
+ x2 = np.reshape(np.arange(10), (2, 5))
+ x2r = np.roll(x2, 1)
+ assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, 1, axis=0)
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, 1, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ # Roll multiple axes at once.
+ x2r = np.roll(x2, 1, axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (-1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (0, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, (0, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
+
+ x2r = np.roll(x2, (1, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (-1, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
+
+ # Roll the same axis multiple times.
+ x2r = np.roll(x2, 1, axis=(0, 0))
+ assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
+
+ x2r = np.roll(x2, 1, axis=(1, 1))
+ assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
+
+ # Roll more than one turn in either direction.
+ x2r = np.roll(x2, 6, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, -4, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ def test_roll_empty(self):
+ x = np.array([])
+ assert_equal(np.roll(x, 1), np.array([]))
+
+
+class TestRollaxis(object):
+
+ # expected shape indexed by (axis, start) for array of
+ # shape (1, 2, 3, 4)
+ tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
+ (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
+ (0, 4): (2, 3, 4, 1),
+ (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
+ (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
+ (1, 4): (1, 3, 4, 2),
+ (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
+ (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
+ (2, 4): (1, 2, 4, 3),
+ (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
+ (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
+ (3, 4): (1, 2, 3, 4)}
+
+ def test_exceptions(self):
+ a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
+ assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
+ assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
+
+ def test_results(self):
+ a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
+ aind = np.indices(a.shape)
+ assert_(a.flags['OWNDATA'])
+ for (i, j) in self.tgtshape:
+ # positive axis, positive start
+ res = np.rollaxis(a, axis=i, start=j)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
+ assert_(not res.flags['OWNDATA'])
+
+ # negative axis, positive start
+ ip = i + 1
+ res = np.rollaxis(a, axis=-ip, start=j)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(4 - ip, j)])
+ assert_(not res.flags['OWNDATA'])
+
+ # positive axis, negative start
+ jp = j + 1 if j < 4 else j
+ res = np.rollaxis(a, axis=i, start=-jp)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(i, 4 - jp)])
+ assert_(not res.flags['OWNDATA'])
+
+ # negative axis, negative start
+ ip = i + 1
+ jp = j + 1 if j < 4 else j
+ res = np.rollaxis(a, axis=-ip, start=-jp)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
+ assert_(not res.flags['OWNDATA'])
+
+
+class TestMoveaxis(object):
+ def test_move_to_end(self):
+ x = np.random.randn(5, 6, 7)
+ for source, expected in [(0, (6, 7, 5)),
+ (1, (5, 7, 6)),
+ (2, (5, 6, 7)),
+ (-1, (5, 6, 7))]:
+ actual = np.moveaxis(x, source, -1).shape
+ assert_(actual, expected)
+
+ def test_move_new_position(self):
+ x = np.random.randn(1, 2, 3, 4)
+ for source, destination, expected in [
+ (0, 1, (2, 1, 3, 4)),
+ (1, 2, (1, 3, 2, 4)),
+ (1, -1, (1, 3, 4, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_preserve_order(self):
+ x = np.zeros((1, 2, 3, 4))
+ for source, destination in [
+ (0, 0),
+ (3, -1),
+ (-1, 3),
+ ([0, -1], [0, -1]),
+ ([2, 0], [2, 0]),
+ (range(4), range(4)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, (1, 2, 3, 4))
+
+ def test_move_multiples(self):
+ x = np.zeros((0, 1, 2, 3))
+ for source, destination, expected in [
+ ([0, 1], [2, 3], (2, 3, 0, 1)),
+ ([2, 3], [0, 1], (2, 3, 0, 1)),
+ ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
+ ([3, 0], [1, 0], (0, 3, 1, 2)),
+ ([0, 3], [0, 1], (0, 3, 1, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_errors(self):
+ x = np.random.randn(1, 2, 3)
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
+ np.moveaxis, x, 3, 0)
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
+ np.moveaxis, x, -4, 0)
+ assert_raises_regex(np.AxisError, 'destination.*out of bounds',
+ np.moveaxis, x, 0, 5)
+ assert_raises_regex(ValueError, 'repeated axis in `source`',
+ np.moveaxis, x, [0, 0], [0, 1])
+ assert_raises_regex(ValueError, 'repeated axis in `destination`',
+ np.moveaxis, x, [0, 1], [1, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, 0, [0, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, [0, 1], [0])
+
+ def test_array_likes(self):
+ x = np.ma.zeros((1, 2, 3))
+ result = np.moveaxis(x, 0, 0)
+ assert_(x.shape, result.shape)
+ assert_(isinstance(result, np.ma.MaskedArray))
+
+ x = [1, 2, 3]
+ result = np.moveaxis(x, 0, 0)
+ assert_(x, list(result))
+ assert_(isinstance(result, np.ndarray))
+
+
+class TestCross(object):
+ def test_2x2(self):
+ u = [1, 2]
+ v = [3, 4]
+ z = -2
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_2x3(self):
+ u = [1, 2]
+ v = [3, 4, 5]
+ z = np.array([10, -5, -2])
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_3x3(self):
+ u = [1, 2, 3]
+ v = [4, 5, 6]
+ z = np.array([-3, 6, -3])
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_broadcasting(self):
+ # Ticket #2624 (Trac #2032)
+ u = np.tile([1, 2], (11, 1))
+ v = np.tile([3, 4], (11, 1))
+ z = -2
+ assert_equal(np.cross(u, v), z)
+ assert_equal(np.cross(v, u), -z)
+ assert_equal(np.cross(u, u), 0)
+
+ u = np.tile([1, 2], (11, 1)).T
+ v = np.tile([3, 4, 5], (11, 1))
+ z = np.tile([10, -5, -2], (11, 1))
+ assert_equal(np.cross(u, v, axisa=0), z)
+ assert_equal(np.cross(v, u.T), -z)
+ assert_equal(np.cross(v, v), 0)
+
+ u = np.tile([1, 2, 3], (11, 1)).T
+ v = np.tile([3, 4], (11, 1)).T
+ z = np.tile([-12, 9, -2], (11, 1))
+ assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
+ assert_equal(np.cross(v.T, u.T), -z)
+ assert_equal(np.cross(u.T, u.T), 0)
+
+ u = np.tile([1, 2, 3], (5, 1))
+ v = np.tile([4, 5, 6], (5, 1)).T
+ z = np.tile([-3, 6, -3], (5, 1))
+ assert_equal(np.cross(u, v, axisb=0), z)
+ assert_equal(np.cross(v.T, u), -z)
+ assert_equal(np.cross(u, u), 0)
+
+ def test_broadcasting_shapes(self):
+ u = np.ones((2, 1, 3))
+ v = np.ones((5, 3))
+ assert_equal(np.cross(u, v).shape, (2, 5, 3))
+ u = np.ones((10, 3, 5))
+ v = np.ones((2, 5))
+ assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
+ u = np.ones((10, 3, 5, 7))
+ v = np.ones((5, 7, 2))
+ assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
+ assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
+ # gh-5885
+ u = np.ones((3, 4, 2))
+ for axisc in range(-2, 2):
+ assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
+
+
+def test_outer_out_param():
+ arr1 = np.ones((5,))
+ arr2 = np.ones((2,))
+ arr3 = np.linspace(-2, 2, 5)
+ out1 = np.ndarray(shape=(5,5))
+ out2 = np.ndarray(shape=(2, 5))
+ res1 = np.outer(arr1, arr3, out1)
+ assert_equal(res1, out1)
+ assert_equal(np.outer(arr2, arr3, out2), out2)
+
+
+class TestRequire(object):
+ flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
+ 'F', 'F_CONTIGUOUS', 'FORTRAN',
+ 'A', 'ALIGNED',
+ 'W', 'WRITEABLE',
+ 'O', 'OWNDATA']
+
+ def generate_all_false(self, dtype):
+ arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
+ arr.setflags(write=False)
+ a = arr['a']
+ assert_(not a.flags['C'])
+ assert_(not a.flags['F'])
+ assert_(not a.flags['O'])
+ assert_(not a.flags['W'])
+ assert_(not a.flags['A'])
+ return a
+
+ def set_and_check_flag(self, flag, dtype, arr):
+ if dtype is None:
+ dtype = arr.dtype
+ b = np.require(arr, dtype, [flag])
+ assert_(b.flags[flag])
+ assert_(b.dtype == dtype)
+
+ # a further call to np.require ought to return the same array
+ # unless OWNDATA is specified.
+ c = np.require(b, None, [flag])
+ if flag[0] != 'O':
+ assert_(c is b)
+ else:
+ assert_(c.flags[flag])
+
+ def test_require_each(self):
+
+ id = ['f8', 'i4']
+ fd = [None, 'f8', 'c16']
+ for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
+ a = self.generate_all_false(idtype)
+ self.set_and_check_flag(flag, fdtype, a)
+
+ def test_unknown_requirement(self):
+ a = self.generate_all_false('f8')
+ assert_raises(KeyError, np.require, a, None, 'Q')
+
+ def test_non_array_input(self):
+ a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
+ assert_(a.flags['O'])
+ assert_(a.flags['C'])
+ assert_(a.flags['A'])
+ assert_(a.dtype == 'i4')
+ assert_equal(a, [1, 2, 3, 4])
+
+ def test_C_and_F_simul(self):
+ a = self.generate_all_false('f8')
+ assert_raises(ValueError, np.require, a, None, ['C', 'F'])
+
+ def test_ensure_array(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ a = ArraySubclass((2, 2))
+ b = np.require(a, None, ['E'])
+ assert_(type(b) is np.ndarray)
+
+ def test_preserve_subtype(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ for flag in self.flag_names:
+ a = ArraySubclass((2, 2))
+ self.set_and_check_flag(flag, None, a)
+
+
+class TestBroadcast(object):
+ def test_broadcast_in_args(self):
+ # gh-5881
+ arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
+ np.empty((5, 1, 7))]
+ mits = [np.broadcast(*arrs),
+ np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
+ np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
+ for mit in mits:
+ assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
+ assert_equal(mit.nd, 3)
+ assert_equal(mit.numiter, 4)
+ for a, ia in zip(arrs, mit.iters):
+ assert_(a is ia.base)
+
+ def test_broadcast_single_arg(self):
+ # gh-6899
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
+ assert_equal(mit.nd, 3)
+ assert_equal(mit.numiter, 1)
+ assert_(arrs[0] is mit.iters[0].base)
+
+ def test_number_of_arguments(self):
+ arr = np.empty((5,))
+ for j in range(35):
+ arrs = [arr] * j
+ if j < 1 or j > 32:
+ assert_raises(ValueError, np.broadcast, *arrs)
+ else:
+ mit = np.broadcast(*arrs)
+ assert_equal(mit.numiter, j)
+
+
+class TestKeepdims(object):
+
+ class sub_array(np.ndarray):
+ def sum(self, axis=None, dtype=None, out=None):
+ return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
+
+ def test_raise(self):
+ sub_class = self.sub_array
+ x = np.arange(30).view(sub_class)
+ assert_raises(TypeError, np.sum, x, keepdims=True)
+
+
+class TestTensordot(object):
+
+ def test_zero_dimension(self):
+ # Test resolution to issue #5663
+ a = np.ndarray((3,0))
+ b = np.ndarray((0,4))
+ td = np.tensordot(a, b, (1, 0))
+ assert_array_equal(td, np.dot(a, b))
+ assert_array_equal(td, np.einsum('ij,jk', a, b))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc
new file mode 100644
index 0000000..ddf73ce
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numeric.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py
new file mode 100644
index 0000000..71f7b71
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.py
@@ -0,0 +1,499 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import itertools
+
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# This is the structure of the table used for plain objects:
+#
+# +-+-+-+
+# |x|y|z|
+# +-+-+-+
+
+# Structure of a plain array description:
+Pdescr = [
+ ('x', 'i4', (2,)),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+# A plain list of tuples with values for testing:
+PbufferT = [
+ # x y z
+ ([3, 2], [[6., 4.], [6., 4.]], 8),
+ ([4, 3], [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+# This is the structure of the table used for nested objects (DON'T PANIC!):
+#
+# +-+---------------------------------+-----+----------+-+-+
+# |x|Info |color|info |y|z|
+# | +-----+--+----------------+----+--+ +----+-----+ | |
+# | |value|y2|Info2 |name|z2| |Name|Value| | |
+# | | | +----+-----+--+--+ | | | | | | |
+# | | | |name|value|y3|z3| | | | | | | |
+# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
+#
+
+# The corresponding nested array description:
+Ndescr = [
+ ('x', 'i4', (2,)),
+ ('Info', [
+ ('value', 'c16'),
+ ('y2', 'f8'),
+ ('Info2', [
+ ('name', 'S2'),
+ ('value', 'c16', (2,)),
+ ('y3', 'f8', (2,)),
+ ('z3', 'u4', (2,))]),
+ ('name', 'S2'),
+ ('z2', 'b1')]),
+ ('color', 'S2'),
+ ('info', [
+ ('Name', 'U8'),
+ ('Value', 'c16')]),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+NbufferT = [
+ # x Info color info y z
+ # value y2 Info2 name z2 Name Value
+ # name value y3 z3
+ ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8),
+ ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
+
+def normalize_descr(descr):
+ "Normalize a description adding the platform byteorder."
+
+ out = []
+ for item in descr:
+ dtype = item[1]
+ if isinstance(dtype, str):
+ if dtype[0] not in ['|', '<', '>']:
+ onebyte = dtype[1:] == "1"
+ if onebyte or dtype[0] in ['S', 'V', 'b']:
+ dtype = "|" + dtype
+ else:
+ dtype = byteorder + dtype
+ if len(item) > 2 and np.prod(item[2]) > 1:
+ nitem = (item[0], dtype, item[2])
+ else:
+ nitem = (item[0], dtype)
+ out.append(nitem)
+ elif isinstance(dtype, list):
+ l = normalize_descr(dtype)
+ out.append((item[0], l))
+ else:
+ raise ValueError("Expected a str or list and got %s" %
+ (type(item)))
+ return out
+
+
+############################################################
+# Creation tests
+############################################################
+
+class CreateZeros(object):
+ """Check the creation of heterogeneous arrays zero-valued"""
+
+ def test_zeros0D(self):
+ """Check creation of 0-dimensional objects"""
+ h = np.zeros((), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+ assert_(h.dtype.fields['x'][0].char == 'V')
+ assert_(h.dtype.fields['x'][0].type == np.void)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((), dtype='u1'))
+
+ def test_zerosSD(self):
+ """Check creation of single-dimensional objects"""
+ h = np.zeros((2,), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['y'].name[:4] == 'void')
+ assert_(h.dtype['y'].char == 'V')
+ assert_(h.dtype['y'].type == np.void)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((2,), dtype='u1'))
+
+ def test_zerosMD(self):
+ """Check creation of multi-dimensional objects"""
+ h = np.zeros((2, 3), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['z'].name == 'uint8')
+ assert_(h.dtype['z'].char == 'B')
+ assert_(h.dtype['z'].type == np.uint8)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
+
+
+class TestCreateZerosPlain(CreateZeros):
+ """Check the creation of heterogeneous arrays zero-valued (plain)"""
+ _descr = Pdescr
+
+class TestCreateZerosNested(CreateZeros):
+ """Check the creation of heterogeneous arrays zero-valued (nested)"""
+ _descr = Ndescr
+
+
+class CreateValues(object):
+ """Check the creation of heterogeneous arrays with values"""
+
+ def test_tuple(self):
+ """Check creation from tuples"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (2,))
+ else:
+ assert_(h.shape == ())
+
+ def test_list_of_tuple(self):
+ """Check creation from list of tuples"""
+ h = np.array([self._buffer], dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (1, 2))
+ else:
+ assert_(h.shape == (1,))
+
+ def test_list_of_list_of_tuple(self):
+ """Check creation from list of list of tuples"""
+ h = np.array([[self._buffer]], dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (1, 1, 2))
+ else:
+ assert_(h.shape == (1, 1))
+
+
+class TestCreateValuesPlainSingle(CreateValues):
+ """Check the creation of heterogeneous arrays (plain, single row)"""
+ _descr = Pdescr
+ multiple_rows = 0
+ _buffer = PbufferT[0]
+
+class TestCreateValuesPlainMultiple(CreateValues):
+ """Check the creation of heterogeneous arrays (plain, multiple rows)"""
+ _descr = Pdescr
+ multiple_rows = 1
+ _buffer = PbufferT
+
+class TestCreateValuesNestedSingle(CreateValues):
+ """Check the creation of heterogeneous arrays (nested, single row)"""
+ _descr = Ndescr
+ multiple_rows = 0
+ _buffer = NbufferT[0]
+
+class TestCreateValuesNestedMultiple(CreateValues):
+ """Check the creation of heterogeneous arrays (nested, multiple rows)"""
+ _descr = Ndescr
+ multiple_rows = 1
+ _buffer = NbufferT
+
+
+############################################################
+# Reading tests
+############################################################
+
+class ReadValuesPlain(object):
+ """Check the reading of values in heterogeneous arrays (plain)"""
+
+ def test_access_fields(self):
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_(h.shape == ())
+ assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+ assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
+ assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
+ else:
+ assert_(len(h) == 2)
+ assert_equal(h['x'], np.array([self._buffer[0][0],
+ self._buffer[1][0]], dtype='i4'))
+ assert_equal(h['y'], np.array([self._buffer[0][1],
+ self._buffer[1][1]], dtype='f8'))
+ assert_equal(h['z'], np.array([self._buffer[0][2],
+ self._buffer[1][2]], dtype='u1'))
+
+
+class TestReadValuesPlainSingle(ReadValuesPlain):
+ """Check the creation of heterogeneous arrays (plain, single row)"""
+ _descr = Pdescr
+ multiple_rows = 0
+ _buffer = PbufferT[0]
+
+class TestReadValuesPlainMultiple(ReadValuesPlain):
+ """Check the values of heterogeneous arrays (plain, multiple rows)"""
+ _descr = Pdescr
+ multiple_rows = 1
+ _buffer = PbufferT
+
+class ReadValuesNested(object):
+ """Check the reading of values in heterogeneous arrays (nested)"""
+
+ def test_access_top_fields(self):
+ """Check reading the top fields of a nested array"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_(h.shape == ())
+ assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+ assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
+ assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
+ else:
+ assert_(len(h) == 2)
+ assert_equal(h['x'], np.array([self._buffer[0][0],
+ self._buffer[1][0]], dtype='i4'))
+ assert_equal(h['y'], np.array([self._buffer[0][4],
+ self._buffer[1][4]], dtype='f8'))
+ assert_equal(h['z'], np.array([self._buffer[0][5],
+ self._buffer[1][5]], dtype='u1'))
+
+ def test_nested1_acessors(self):
+ """Check reading the nested fields of a nested array (1st level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_equal(h['Info']['value'],
+ np.array(self._buffer[1][0], dtype='c16'))
+ assert_equal(h['Info']['y2'],
+ np.array(self._buffer[1][1], dtype='f8'))
+ assert_equal(h['info']['Name'],
+ np.array(self._buffer[3][0], dtype='U2'))
+ assert_equal(h['info']['Value'],
+ np.array(self._buffer[3][1], dtype='c16'))
+ else:
+ assert_equal(h['Info']['value'],
+ np.array([self._buffer[0][1][0],
+ self._buffer[1][1][0]],
+ dtype='c16'))
+ assert_equal(h['Info']['y2'],
+ np.array([self._buffer[0][1][1],
+ self._buffer[1][1][1]],
+ dtype='f8'))
+ assert_equal(h['info']['Name'],
+ np.array([self._buffer[0][3][0],
+ self._buffer[1][3][0]],
+ dtype='U2'))
+ assert_equal(h['info']['Value'],
+ np.array([self._buffer[0][3][1],
+ self._buffer[1][3][1]],
+ dtype='c16'))
+
+ def test_nested2_acessors(self):
+ """Check reading the nested fields of a nested array (2nd level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_equal(h['Info']['Info2']['value'],
+ np.array(self._buffer[1][2][1], dtype='c16'))
+ assert_equal(h['Info']['Info2']['z3'],
+ np.array(self._buffer[1][2][3], dtype='u4'))
+ else:
+ assert_equal(h['Info']['Info2']['value'],
+ np.array([self._buffer[0][1][2][1],
+ self._buffer[1][1][2][1]],
+ dtype='c16'))
+ assert_equal(h['Info']['Info2']['z3'],
+ np.array([self._buffer[0][1][2][3],
+ self._buffer[1][1][2][3]],
+ dtype='u4'))
+
+ def test_nested1_descriptor(self):
+ """Check access nested descriptors of a nested array (1st level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(h.dtype['Info']['value'].name == 'complex128')
+ assert_(h.dtype['Info']['y2'].name == 'float64')
+ if sys.version_info[0] >= 3:
+ assert_(h.dtype['info']['Name'].name == 'str256')
+ else:
+ assert_(h.dtype['info']['Name'].name == 'unicode256')
+ assert_(h.dtype['info']['Value'].name == 'complex128')
+
+ def test_nested2_descriptor(self):
+ """Check access nested descriptors of a nested array (2nd level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+ assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
+
+
+class TestReadValuesNestedSingle(ReadValuesNested):
+ """Check the values of heterogeneous arrays (nested, single row)"""
+ _descr = Ndescr
+ multiple_rows = False
+ _buffer = NbufferT[0]
+
+class TestReadValuesNestedMultiple(ReadValuesNested):
+ """Check the values of heterogeneous arrays (nested, multiple rows)"""
+ _descr = Ndescr
+ multiple_rows = True
+ _buffer = NbufferT
+
+class TestEmptyField(object):
+ def test_assign(self):
+ a = np.arange(10, dtype=np.float32)
+ a.dtype = [("int", "<0i4"), ("float", "<2f4")]
+ assert_(a['int'].shape == (5, 0))
+ assert_(a['float'].shape == (5, 2))
+
+class TestCommonType(object):
+ def test_scalar_loses1(self):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
+ assert_(res == 'f4')
+
+ def test_scalar_loses2(self):
+ res = np.find_common_type(['f4', 'f4'], ['i8'])
+ assert_(res == 'f4')
+
+ def test_scalar_wins(self):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
+ assert_(res == 'c8')
+
+ def test_scalar_wins2(self):
+ res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
+ assert_(res == 'f8')
+
+ def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
+ res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
+ assert_(res == 'f8')
+
+class TestMultipleFields(object):
+ def setup(self):
+ self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
+
+ def _bad_call(self):
+ return self.ary['f0', 'f1']
+
+ def test_no_tuple(self):
+ assert_raises(IndexError, self._bad_call)
+
+ def test_return(self):
+ res = self.ary[['f0', 'f2']].tolist()
+ assert_(res == [(1, 3), (5, 7)])
+
+
+class TestIsSubDType(object):
+ # scalar types can be promoted into dtypes
+ wrappers = [np.dtype, lambda x: x]
+
+ def test_both_abstract(self):
+ assert_(np.issubdtype(np.floating, np.inexact))
+ assert_(not np.issubdtype(np.inexact, np.floating))
+
+ def test_same(self):
+ for cls in (np.float32, np.int32):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(np.issubdtype(w1(cls), w2(cls)))
+
+ def test_subclass(self):
+ # note we cannot promote floating to a dtype, as it would turn into a
+ # concrete type
+ for w in self.wrappers:
+ assert_(np.issubdtype(w(np.float32), np.floating))
+ assert_(np.issubdtype(w(np.float64), np.floating))
+
+ def test_subclass_backwards(self):
+ for w in self.wrappers:
+ assert_(not np.issubdtype(np.floating, w(np.float32)))
+ assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+ def test_sibling_class(self):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+ assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
+
+class TestSctypeDict(object):
+ def test_longdouble(self):
+ assert_(np.sctypeDict['f8'] is not np.longdouble)
+ assert_(np.sctypeDict['c16'] is not np.clongdouble)
+
+
+class TestBitName(object):
+ def test_abstract(self):
+ assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
+
+
+class TestMaximumSctype(object):
+
+ # note that parametrizing with sctype['int'] and similar would skip types
+ # with the same size (gh-11923)
+
+ @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
+ def test_int(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
+
+ @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
+ def test_uint(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
+
+ @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
+ def test_float(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
+
+ @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
+ def test_complex(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
+
+ @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
+ def test_other(self, t):
+ assert_equal(np.maximum_sctype(t), t)
+
+
+class Test_sctype2char(object):
+ # This function is old enough that we're really just documenting the quirks
+ # at this point.
+
+ def test_scalar_type(self):
+ assert_equal(np.sctype2char(np.double), 'd')
+ assert_equal(np.sctype2char(np.int_), 'l')
+ assert_equal(np.sctype2char(np.unicode_), 'U')
+ assert_equal(np.sctype2char(np.bytes_), 'S')
+
+ def test_other_type(self):
+ assert_equal(np.sctype2char(float), 'd')
+ assert_equal(np.sctype2char(list), 'O')
+ assert_equal(np.sctype2char(np.ndarray), 'O')
+
+ def test_third_party_scalar_type(self):
+ from numpy.core._rational_tests import rational
+ assert_raises(KeyError, np.sctype2char, rational)
+ assert_raises(KeyError, np.sctype2char, rational(1))
+
+ def test_array_instance(self):
+ assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
+
+ def test_abstract_type(self):
+ assert_raises(KeyError, np.sctype2char, np.floating)
+
+ def test_non_type(self):
+ assert_raises(ValueError, np.sctype2char, 1)
+
+@pytest.mark.parametrize("rep, expected", [
+ (np.int32, True),
+ (list, False),
+ (1.1, False),
+ (str, True),
+ (np.dtype(np.float64), True),
+ (np.dtype((np.int16, (3, 4))), True),
+ (np.dtype([('a', np.int8)]), True),
+ ])
+def test_issctype(rep, expected):
+ # ensure proper identification of scalar
+ # data-types by issctype()
+ actual = np.issctype(rep)
+ assert_equal(actual, expected)
+
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+ reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+class TestDocStrings(object):
+ def test_platform_dependent_aliases(self):
+ if np.int64 is np.int_:
+ assert_('int64' in np.int_.__doc__)
+ elif np.int64 is np.longlong:
+ assert_('int64' in np.longlong.__doc__)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc
new file mode 100644
index 0000000..486c537
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_numerictypes.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py
new file mode 100644
index 0000000..8f1c165
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.py
@@ -0,0 +1,392 @@
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex)
+from numpy.core.overrides import (
+ _get_implementing_args, array_function_dispatch,
+ verify_matching_signatures, ENABLE_ARRAY_FUNCTION)
+from numpy.core.numeric import pickle
+import pytest
+
+
+requires_array_function = pytest.mark.skipif(
+ not ENABLE_ARRAY_FUNCTION,
+ reason="__array_function__ dispatch not enabled.")
+
+
+def _return_not_implemented(self, *args, **kwargs):
+ return NotImplemented
+
+
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+ """Docstring."""
+ return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+ """Docstring."""
+ return 'original'
+
+
+@requires_array_function
+class TestGetImplementingArgs(object):
+
+ def test_ndarray(self):
+ array = np.array(1)
+
+ args = _get_implementing_args([array])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([array, array])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([array, 1])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([1, array])
+ assert_equal(list(args), [array])
+
+ def test_ndarray_subclasses(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ array = np.array(1).view(np.ndarray)
+ override_sub = np.array(1).view(OverrideSub)
+ no_override_sub = np.array(1).view(NoOverrideSub)
+
+ args = _get_implementing_args([array, override_sub])
+ assert_equal(list(args), [override_sub, array])
+
+ args = _get_implementing_args([array, no_override_sub])
+ assert_equal(list(args), [no_override_sub, array])
+
+ args = _get_implementing_args(
+ [override_sub, no_override_sub])
+ assert_equal(list(args), [override_sub, no_override_sub])
+
+ def test_ndarray_and_duck_array(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ other = Other()
+
+ args = _get_implementing_args([other, array])
+ assert_equal(list(args), [other, array])
+
+ args = _get_implementing_args([array, other])
+ assert_equal(list(args), [array, other])
+
+ def test_ndarray_subclass_and_duck_array(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ subarray = np.array(1).view(OverrideSub)
+ other = Other()
+
+ assert_equal(_get_implementing_args([array, subarray, other]),
+ [subarray, array, other])
+ assert_equal(_get_implementing_args([array, other, subarray]),
+ [subarray, array, other])
+
+ def test_many_duck_arrays(self):
+
+ class A(object):
+ __array_function__ = _return_not_implemented
+
+ class B(A):
+ __array_function__ = _return_not_implemented
+
+ class C(A):
+ __array_function__ = _return_not_implemented
+
+ class D(object):
+ __array_function__ = _return_not_implemented
+
+ a = A()
+ b = B()
+ c = C()
+ d = D()
+
+ assert_equal(_get_implementing_args([1]), [])
+ assert_equal(_get_implementing_args([a]), [a])
+ assert_equal(_get_implementing_args([a, 1]), [a])
+ assert_equal(_get_implementing_args([a, a, a]), [a])
+ assert_equal(_get_implementing_args([a, d, a]), [a, d])
+ assert_equal(_get_implementing_args([a, b]), [b, a])
+ assert_equal(_get_implementing_args([b, a]), [b, a])
+ assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+ assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+ def test_too_many_duck_arrays(self):
+ namespace = dict(__array_function__=_return_not_implemented)
+ types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+ relevant_args = [t() for t in types]
+
+ actual = _get_implementing_args(relevant_args[:32])
+ assert_equal(actual, relevant_args[:32])
+
+ with assert_raises_regex(TypeError, 'distinct argument types'):
+ _get_implementing_args(relevant_args)
+
+
+@requires_array_function
+class TestNDArrayArrayFunction(object):
+
+ def test_method(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ array = np.array([1])
+ other = Other()
+ no_override_sub = array.view(NoOverrideSub)
+ override_sub = array.view(OverrideSub)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray,),
+ args=(array, 1.), kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, Other),
+ args=(array, other), kwargs={})
+ assert_(result is NotImplemented)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, NoOverrideSub),
+ args=(array, no_override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, OverrideSub),
+ args=(array, override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ np.concatenate((array, other))
+
+ expected = np.concatenate((array, array))
+ result = np.concatenate((array, no_override_sub))
+ assert_equal(result, expected.view(NoOverrideSub))
+ result = np.concatenate((array, override_sub))
+ assert_equal(result, expected.view(OverrideSub))
+
+ def test_no_wrapper(self):
+ array = np.array(1)
+ func = dispatched_one_arg.__wrapped__
+ with assert_raises_regex(AttributeError, '__wrapped__'):
+ array.__array_function__(func=func,
+ types=(np.ndarray,),
+ args=(array,), kwargs={})
+
+
+@requires_array_function
+class TestArrayFunctionDispatch(object):
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ roundtripped = pickle.loads(
+ pickle.dumps(dispatched_one_arg, protocol=proto))
+ assert_(roundtripped is dispatched_one_arg)
+
+ def test_name_and_docstring(self):
+ assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
+ if sys.flags.optimize < 2:
+ assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
+
+ def test_interface(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return (self, func, types, args, kwargs)
+
+ original = MyArray()
+ (obj, func, types, args, kwargs) = dispatched_one_arg(original)
+ assert_(obj is original)
+ assert_(func is dispatched_one_arg)
+ assert_equal(set(types), {MyArray})
+ # assert_equal uses the overloaded np.iscomplexobj() internally
+ assert_(args == (original,))
+ assert_equal(kwargs, {})
+
+ def test_not_implemented(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return NotImplemented
+
+ array = MyArray()
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ dispatched_one_arg(array)
+
+
+@requires_array_function
+class TestVerifyMatchingSignatures(object):
+
+ def test_verify_matching_signatures(self):
+
+ verify_matching_signatures(lambda x: 0, lambda x: 0)
+ verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
+ verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
+
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda a: 0, lambda b: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x: 0, lambda x=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
+
+ def test_array_function_dispatch(self):
+
+ with assert_raises(RuntimeError):
+ @array_function_dispatch(lambda x: (x,))
+ def f(y):
+ pass
+
+ # should not raise
+ @array_function_dispatch(lambda x: (x,), verify=False)
+ def f(y):
+ pass
+
+
+def _new_duck_type_and_implements():
+ """Create a duck array type and implements functions."""
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementations."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ return (MyArray, implements)
+
+
+@requires_array_function
+class TestArrayFunctionImplementation(object):
+
+ def test_one_arg(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(dispatched_one_arg)
+ def _(array):
+ return 'myarray'
+
+ assert_equal(dispatched_one_arg(1), 'original')
+ assert_equal(dispatched_one_arg(MyArray()), 'myarray')
+
+ def test_optional_args(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array, option=None: (array,))
+ def func_with_option(array, option='default'):
+ return option
+
+ @implements(func_with_option)
+ def my_array_func_with_option(array, new_option='myarray'):
+ return new_option
+
+ # we don't need to implement every option on __array_function__
+ # implementations
+ assert_equal(func_with_option(1), 'default')
+ assert_equal(func_with_option(1, option='extra'), 'extra')
+ assert_equal(func_with_option(MyArray()), 'myarray')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), option='extra')
+
+ # but new options on implementations can't be used
+ result = my_array_func_with_option(MyArray(), new_option='yes')
+ assert_equal(result, 'yes')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), new_option='no')
+
+ def test_not_implemented(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array: (array,), module='my')
+ def func(array):
+ return array
+
+ array = np.array(1)
+ assert_(func(array) is array)
+ assert_equal(func.__module__, 'my')
+
+ with assert_raises_regex(
+ TypeError, "no implementation found for 'my.func'"):
+ func(MyArray())
+
+
+class TestNDArrayMethods(object):
+
+ def test_repr(self):
+ # gh-12162: should still be defined even if __array_function__ doesn't
+ # implement np.array_repr()
+
+ class MyArray(np.ndarray):
+ def __array_function__(*args, **kwargs):
+ return NotImplemented
+
+ array = np.array(1).view(MyArray)
+ assert_equal(repr(array), 'MyArray(1)')
+ assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions(object):
+
+ def test_set_module(self):
+ assert_equal(np.sum.__module__, 'numpy')
+ assert_equal(np.char.equal.__module__, 'numpy.char')
+ assert_equal(np.fft.fft.__module__, 'numpy.fft')
+ assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 3 only")
+ def test_inspect_sum(self):
+ signature = inspect.signature(np.sum)
+ assert_('axis' in signature.parameters)
+
+ @requires_array_function
+ def test_override_sum(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(np.sum)
+ def _(array):
+ return 'yes'
+
+ assert_equal(np.sum(MyArray()), 'yes')
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc
new file mode 100644
index 0000000..7140b53
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_overrides.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py
new file mode 100644
index 0000000..c5c091e
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.py
@@ -0,0 +1,205 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_equal
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_types(tp):
+ """ Check formatting.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [0, 1, -1, 1e20]:
+ assert_equal(str(tp(x)), str(float(x)),
+ err_msg='Failed str formatting for type %s' % tp)
+
+ if tp(1e16).itemsize > 4:
+ assert_equal(str(tp(1e16)), str(float('1e16')),
+ err_msg='Failed str formatting for type %s' % tp)
+ else:
+ ref = '1e+16'
+ assert_equal(str(tp(1e16)), ref,
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_nan_inf_float(tp):
+ """ Check formatting of nan & inf.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [np.inf, -np.inf, np.nan]:
+ assert_equal(str(tp(x)), _REF[x],
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_types(tp):
+ """Check formatting of complex types.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [0, 1, -1, 1e20]:
+ assert_equal(str(tp(x)), str(complex(x)),
+ err_msg='Failed str formatting for type %s' % tp)
+ assert_equal(str(tp(x*1j)), str(complex(x*1j)),
+ err_msg='Failed str formatting for type %s' % tp)
+ assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
+ err_msg='Failed str formatting for type %s' % tp)
+
+ if tp(1e16).itemsize > 8:
+ assert_equal(str(tp(1e16)), str(complex(1e16)),
+ err_msg='Failed str formatting for type %s' % tp)
+ else:
+ ref = '(1e+16+0j)'
+ assert_equal(str(tp(1e16)), ref,
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_inf_nan(dtype):
+ """Check inf/nan formatting of complex types."""
+ TESTS = {
+ complex(np.inf, 0): "(inf+0j)",
+ complex(0, np.inf): "infj",
+ complex(-np.inf, 0): "(-inf+0j)",
+ complex(0, -np.inf): "-infj",
+ complex(np.inf, 1): "(inf+1j)",
+ complex(1, np.inf): "(1+infj)",
+ complex(-np.inf, 1): "(-inf+1j)",
+ complex(1, -np.inf): "(1-infj)",
+ complex(np.nan, 0): "(nan+0j)",
+ complex(0, np.nan): "nanj",
+ complex(-np.nan, 0): "(nan+0j)",
+ complex(0, -np.nan): "nanj",
+ complex(np.nan, 1): "(nan+1j)",
+ complex(1, np.nan): "(1+nanj)",
+ complex(-np.nan, 1): "(nan+1j)",
+ complex(1, -np.nan): "(1+nanj)",
+ }
+ for c, s in TESTS.items():
+ assert_equal(str(dtype(c)), s)
+
+
+# print tests
+def _test_redirected_print(x, tp, ref=None):
+ file = StringIO()
+ file_tp = StringIO()
+ stdout = sys.stdout
+ try:
+ sys.stdout = file_tp
+ print(tp(x))
+ sys.stdout = file
+ if ref:
+ print(ref)
+ else:
+ print(x)
+ finally:
+ sys.stdout = stdout
+
+ assert_equal(file.getvalue(), file_tp.getvalue(),
+ err_msg='print failed for type%s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_type_print(tp):
+ """Check formatting when using print """
+ for x in [0, 1, -1, 1e20]:
+ _test_redirected_print(float(x), tp)
+
+ for x in [np.inf, -np.inf, np.nan]:
+ _test_redirected_print(float(x), tp, _REF[x])
+
+ if tp(1e16).itemsize > 4:
+ _test_redirected_print(float(1e16), tp)
+ else:
+ ref = '1e+16'
+ _test_redirected_print(float(1e16), tp, ref)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_type_print(tp):
+ """Check formatting when using print """
+ # We do not create complex with inf/nan directly because the feature is
+ # missing in python < 2.6
+ for x in [0, 1, -1, 1e20]:
+ _test_redirected_print(complex(x), tp)
+
+ if tp(1e16).itemsize > 8:
+ _test_redirected_print(complex(1e16), tp)
+ else:
+ ref = '(1e+16+0j)'
+ _test_redirected_print(complex(1e16), tp, ref)
+
+ _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
+ _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
+ _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
+
+
+def test_scalar_format():
+ """Test the str.format method with NumPy scalar types"""
+ tests = [('{0}', True, np.bool_),
+ ('{0}', False, np.bool_),
+ ('{0:d}', 130, np.uint8),
+ ('{0:d}', 50000, np.uint16),
+ ('{0:d}', 3000000000, np.uint32),
+ ('{0:d}', 15000000000000000000, np.uint64),
+ ('{0:d}', -120, np.int8),
+ ('{0:d}', -30000, np.int16),
+ ('{0:d}', -2000000000, np.int32),
+ ('{0:d}', -7000000000000000000, np.int64),
+ ('{0:g}', 1.5, np.float16),
+ ('{0:g}', 1.5, np.float32),
+ ('{0:g}', 1.5, np.float64),
+ ('{0:g}', 1.5, np.longdouble),
+ ('{0:g}', 1.5+0.5j, np.complex64),
+ ('{0:g}', 1.5+0.5j, np.complex128),
+ ('{0:g}', 1.5+0.5j, np.clongdouble)]
+
+ for (fmat, val, valtype) in tests:
+ try:
+ assert_equal(fmat.format(val), fmat.format(valtype(val)),
+ "failed with val %s, type %s" % (val, valtype))
+ except ValueError as e:
+ assert_(False,
+ "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
+ (fmat, repr(val), repr(valtype), str(e)))
+
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+ def test_locale_single(self):
+ assert_equal(str(np.float32(1.2)), str(float(1.2)))
+
+ def test_locale_double(self):
+ assert_equal(str(np.double(1.2)), str(float(1.2)))
+
+ def test_locale_longdouble(self):
+ assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc
new file mode 100644
index 0000000..8249c52
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_print.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py
new file mode 100644
index 0000000..c059ef5
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_records.py
@@ -0,0 +1,452 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+import textwrap
+from os import path
+import pytest
+
+import numpy as np
+from numpy.compat import Path
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, temppath
+ )
+from numpy.core.numeric import pickle
+
+
+class TestFromrecords(object):
+ def test_fromrecords(self):
+ r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
+ names='col1,col2,col3')
+ assert_equal(r[0].item(), (456, 'dbe', 1.2))
+ assert_equal(r['col1'].dtype.kind, 'i')
+ if sys.version_info[0] >= 3:
+ assert_equal(r['col2'].dtype.kind, 'U')
+ assert_equal(r['col2'].dtype.itemsize, 12)
+ else:
+ assert_equal(r['col2'].dtype.kind, 'S')
+ assert_equal(r['col2'].dtype.itemsize, 3)
+ assert_equal(r['col3'].dtype.kind, 'f')
+
+ def test_fromrecords_0len(self):
+ """ Verify fromrecords works with a 0-length input """
+ dtype = [('a', float), ('b', float)]
+ r = np.rec.fromrecords([], dtype=dtype)
+ assert_equal(r.shape, (0,))
+
+ def test_fromrecords_2d(self):
+ data = [
+ [(1, 2), (3, 4), (5, 6)],
+ [(6, 5), (4, 3), (2, 1)]
+ ]
+ expected_a = [[1, 3, 5], [6, 4, 2]]
+ expected_b = [[2, 4, 6], [5, 3, 1]]
+
+ # try with dtype
+ r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
+ assert_equal(r1['a'], expected_a)
+ assert_equal(r1['b'], expected_b)
+
+ # try with names
+ r2 = np.rec.fromrecords(data, names=['a', 'b'])
+ assert_equal(r2['a'], expected_a)
+ assert_equal(r2['b'], expected_b)
+
+ assert_equal(r1, r2)
+
+ def test_method_array(self):
+ r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
+ assert_equal(r[1].item(), (25444, b'efg', 1633837924))
+
+ def test_method_array2(self):
+ r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+ (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+ assert_equal(r[1].item(), (2, 22.0, b'b'))
+
+ def test_recarray_slices(self):
+ r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+ (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+ assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
+
+ def test_recarray_fromarrays(self):
+ x1 = np.array([1, 2, 3, 4])
+ x2 = np.array(['a', 'dd', 'xyz', '12'])
+ x3 = np.array([1.1, 2, 3, 4])
+ r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
+ assert_equal(r[1].item(), (2, 'dd', 2.0))
+ x1[1] = 34
+ assert_equal(r.a, np.array([1, 2, 3, 4]))
+
+ def test_recarray_fromfile(self):
+ data_dir = path.join(path.dirname(__file__), 'data')
+ filename = path.join(data_dir, 'recarray_from_file.fits')
+ fd = open(filename, 'rb')
+ fd.seek(2880 * 2)
+ r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.seek(2880 * 2)
+ r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.close()
+ assert_equal(r1, r2)
+
+ def test_recarray_from_obj(self):
+ count = 10
+ a = np.zeros(count, dtype='O')
+ b = np.zeros(count, dtype='f8')
+ c = np.zeros(count, dtype='f8')
+ for i in range(len(a)):
+ a[i] = list(range(1, 10))
+
+ mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
+ for i in range(len(a)):
+ assert_((mine.date[i] == list(range(1, 10))))
+ assert_((mine.data1[i] == 0.0))
+ assert_((mine.data2[i] == 0.0))
+
+ def test_recarray_repr(self):
+ a = np.array([(1, 0.1), (2, 0.2)],
+ dtype=[('foo', ' 2) & (a < 6))
+ xb = np.where((b > 2) & (b < 6))
+ ya = ((a > 2) & (a < 6))
+ yb = ((b > 2) & (b < 6))
+ assert_array_almost_equal(xa, ya.nonzero())
+ assert_array_almost_equal(xb, yb.nonzero())
+ assert_(np.all(a[ya] > 0.5))
+ assert_(np.all(b[yb] > 0.5))
+
+ def test_endian_where(self):
+ # GitHub issue #369
+ net = np.zeros(3, dtype='>f4')
+ net[1] = 0.00458849
+ net[2] = 0.605202
+ max_net = net.max()
+ test = np.where(net <= 0., max_net, net)
+ correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
+ assert_array_almost_equal(test, correct)
+
+ def test_endian_recarray(self):
+ # Ticket #2185
+ dt = np.dtype([
+ ('head', '>u4'),
+ ('data', '>u4', 2),
+ ])
+ buf = np.recarray(1, dtype=dt)
+ buf[0]['head'] = 1
+ buf[0]['data'][:] = [1, 1]
+
+ h = buf[0]['head']
+ d = buf[0]['data'][0]
+ buf[0]['head'] = h
+ buf[0]['data'][0] = d
+ assert_(buf[0]['head'] == 1)
+
+ def test_mem_dot(self):
+ # Ticket #106
+ x = np.random.randn(0, 1)
+ y = np.random.randn(10, 1)
+ # Dummy array to detect bad memory access:
+ _z = np.ones(10)
+ _dummy = np.empty((0, 10))
+ z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
+ np.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
+ # Do the same for the built-in dot:
+ np.core.multiarray.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
+
+ def test_arange_endian(self):
+ # Ticket #111
+ ref = np.arange(10)
+ x = np.arange(10, dtype='= (3, 4):
+ # encoding='bytes' was added in Py3.4
+ for original, data in test_data:
+ result = pickle.loads(data, encoding='bytes')
+ assert_equal(result, original)
+
+ if isinstance(result, np.ndarray) and result.dtype.names:
+ for name in result.dtype.names:
+ assert_(isinstance(name, str))
+
+ def test_pickle_dtype(self):
+ # Ticket #251
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pickle.dumps(float, protocol=proto)
+
+ def test_swap_real(self):
+ # Ticket #265
+ assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
+ assert_equal(np.arange(4, dtype=' 1 and x['two'] > 2)
+
+ def test_method_args(self):
+ # Make sure methods and functions have same default axis
+ # keyword and arguments
+ funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
+ ('sometrue', 'any'),
+ ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
+ 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
+ 'round', 'min', 'max', 'argsort', 'sort']
+ funcs2 = ['compress', 'take', 'repeat']
+
+ for func in funcs1:
+ arr = np.random.rand(8, 7)
+ arr2 = arr.copy()
+ if isinstance(func, tuple):
+ func_meth = func[1]
+ func = func[0]
+ else:
+ func_meth = func
+ res1 = getattr(arr, func_meth)()
+ res2 = getattr(np, func)(arr2)
+ if res1 is None:
+ res1 = arr
+
+ if res1.dtype.kind in 'uib':
+ assert_((res1 == res2).all(), func)
+ else:
+ assert_(abs(res1-res2).max() < 1e-8, func)
+
+ for func in funcs2:
+ arr1 = np.random.rand(8, 7)
+ arr2 = np.random.rand(8, 7)
+ res1 = None
+ if func == 'compress':
+ arr1 = arr1.ravel()
+ res1 = getattr(arr2, func)(arr1)
+ else:
+ arr2 = (15*arr2).astype(int).ravel()
+ if res1 is None:
+ res1 = getattr(arr1, func)(arr2)
+ res2 = getattr(np, func)(arr1, arr2)
+ assert_(abs(res1-res2).max() < 1e-8, func)
+
+ def test_mem_lexsort_strings(self):
+ # Ticket #298
+ lst = ['abc', 'cde', 'fgh']
+ np.lexsort((lst,))
+
+ def test_fancy_index(self):
+ # Ticket #302
+ x = np.array([1, 2])[np.array([0])]
+ assert_equal(x.shape, (1,))
+
+ def test_recarray_copy(self):
+ # Ticket #312
+ dt = [('x', np.int16), ('y', np.float64)]
+ ra = np.array([(1, 2.3)], dtype=dt)
+ rb = np.rec.array(ra, dtype=dt)
+ rb['x'] = 2.
+ assert_(ra['x'] != rb['x'])
+
+ def test_rec_fromarray(self):
+ # Ticket #322
+ x1 = np.array([[1, 2], [3, 4], [5, 6]])
+ x2 = np.array(['a', 'dd', 'xyz'])
+ x3 = np.array([1.1, 2, 3])
+ np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
+
+ def test_object_array_assign(self):
+ x = np.empty((2, 2), object)
+ x.flat[2] = (1, 2, 3)
+ assert_equal(x.flat[2], (1, 2, 3))
+
+ def test_ndmin_float64(self):
+ # Ticket #324
+ x = np.array([1, 2, 3], dtype=np.float64)
+ assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
+ assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
+
+ def test_ndmin_order(self):
+ # Issue #465 and related checks
+ assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
+ assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
+ assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
+ assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
+
+ def test_mem_axis_minimization(self):
+ # Ticket #327
+ data = np.arange(5)
+ data = np.add.outer(data, data)
+
+ def test_mem_float_imag(self):
+ # Ticket #330
+ np.float64(1.0).imag
+
+ def test_dtype_tuple(self):
+ # Ticket #334
+ assert_(np.dtype('i4') == np.dtype(('i4', ())))
+
+ def test_dtype_posttuple(self):
+ # Ticket #335
+ np.dtype([('col1', '()i4')])
+
+ def test_numeric_carray_compare(self):
+ # Ticket #341
+ assert_equal(np.array(['X'], 'c'), b'X')
+
+ def test_string_array_size(self):
+ # Ticket #342
+ assert_raises(ValueError,
+ np.array, [['X'], ['X', 'X', 'X']], '|S1')
+
+ def test_dtype_repr(self):
+ # Ticket #344
+ dt1 = np.dtype(('uint32', 2))
+ dt2 = np.dtype(('uint32', (2,)))
+ assert_equal(dt1.__repr__(), dt2.__repr__())
+
+ def test_reshape_order(self):
+ # Make sure reshape order works.
+ a = np.arange(6).reshape(2, 3, order='F')
+ assert_equal(a, [[0, 2, 4], [1, 3, 5]])
+ a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
+ b = a[:, 1]
+ assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
+
+ def test_reshape_zero_strides(self):
+ # Issue #380, test reshaping of zero strided arrays
+ a = np.ones(1)
+ a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
+ assert_(a.reshape(5, 1).strides[0] == 0)
+
+ def test_reshape_zero_size(self):
+ # GitHub Issue #2700, setting shape failed for 0-sized arrays
+ a = np.ones((0, 2))
+ a.shape = (-1, 2)
+
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
+ def test_reshape_trailing_ones_strides(self):
+ # GitHub issue gh-2949, bad strides for trailing ones of new shape
+ a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
+ strides_c = (16, 8, 8, 8)
+ strides_f = (8, 24, 48, 48)
+ assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
+ assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
+ assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
+
+ def test_repeat_discont(self):
+ # Ticket #352
+ a = np.arange(12).reshape(4, 3)[:, 2]
+ assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
+
+ def test_array_index(self):
+ # Make sure optimization is not called in this case.
+ a = np.array([1, 2, 3])
+ a2 = np.array([[1, 2, 3]])
+ assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
+
+ def test_object_argmax(self):
+ a = np.array([1, 2, 3], dtype=object)
+ assert_(a.argmax() == 2)
+
+ def test_recarray_fields(self):
+ # Ticket #372
+ dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
+ dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
+ for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.array([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.array([(1, 2), (3, 4)]),
+ np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.fromarrays([(1, 2), (3, 4)])]:
+ assert_(a.dtype in [dt0, dt1])
+
+ def test_random_shuffle(self):
+ # Ticket #374
+ a = np.arange(5).reshape((5, 1))
+ b = a.copy()
+ np.random.shuffle(b)
+ assert_equal(np.sort(b, axis=0), a)
+
+ def test_refcount_vdot(self):
+ # Changeset #3443
+ _assert_valid_refcount(np.vdot)
+
+ def test_startswith(self):
+ ca = np.char.array(['Hi', 'There'])
+ assert_equal(ca.startswith('H'), [True, False])
+
+ def test_noncommutative_reduce_accumulate(self):
+ # Ticket #413
+ tosubtract = np.arange(5)
+ todivide = np.array([2.0, 0.5, 0.25])
+ assert_equal(np.subtract.reduce(tosubtract), -10)
+ assert_equal(np.divide.reduce(todivide), 16.0)
+ assert_array_equal(np.subtract.accumulate(tosubtract),
+ np.array([0, -1, -3, -6, -10]))
+ assert_array_equal(np.divide.accumulate(todivide),
+ np.array([2., 4., 16.]))
+
+ def test_convolve_empty(self):
+ # Convolve should raise an error for empty input array.
+ assert_raises(ValueError, np.convolve, [], [1])
+ assert_raises(ValueError, np.convolve, [1], [])
+
+ def test_multidim_byteswap(self):
+ # Ticket #449
+ r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
+ assert_array_equal(r.byteswap(),
+ np.array([(256, (0, 256, 512))], r.dtype))
+
+ def test_string_NULL(self):
+ # Changeset 3557
+ assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
+ 'a\x00\x0b\x0c')
+
+ def test_junk_in_string_fields_of_recarray(self):
+ # Ticket #483
+ r = np.array([[b'abc']], dtype=[('var1', '|S20')])
+ assert_(asbytes(r['var1'][0][0]) == b'abc')
+
+ def test_take_output(self):
+ # Ensure that 'take' honours output parameter.
+ x = np.arange(12).reshape((3, 4))
+ a = np.take(x, [0, 2], axis=1)
+ b = np.zeros_like(a)
+ np.take(x, [0, 2], axis=1, out=b)
+ assert_array_equal(a, b)
+
+ def test_take_object_fail(self):
+ # Issue gh-3001
+ d = 123.
+ a = np.array([d, 1], dtype=object)
+ if HAS_REFCOUNT:
+ ref_d = sys.getrefcount(d)
+ try:
+ a.take([0, 100])
+ except IndexError:
+ pass
+ if HAS_REFCOUNT:
+ assert_(ref_d == sys.getrefcount(d))
+
+ def test_array_str_64bit(self):
+ # Ticket #501
+ s = np.array([1, np.nan], dtype=np.float64)
+ with np.errstate(all='raise'):
+ np.array_str(s) # Should succeed
+
+ def test_frompyfunc_endian(self):
+ # Ticket #503
+ from math import radians
+ uradians = np.frompyfunc(radians, 1, 1)
+ big_endian = np.array([83.4, 83.5], dtype='>f8')
+ little_endian = np.array([83.4, 83.5], dtype=' object
+ # casting succeeds
+ def rs():
+ x = np.ones([484, 286])
+ y = np.zeros([484, 286])
+ x |= y
+
+ assert_raises(TypeError, rs)
+
+ def test_unicode_scalar(self):
+ # Ticket #600
+ x = np.array(["DROND", "DROND1"], dtype="U6")
+ el = x[1]
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ new = pickle.loads(pickle.dumps(el, protocol=proto))
+ assert_equal(new, el)
+
+ def test_arange_non_native_dtype(self):
+ # Ticket #616
+ for T in ('>f4', ' 0)] = v
+
+ assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+
+ # Old special case (different code path):
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+
+ def test_mem_scalar_indexing(self):
+ # Ticket #603
+ x = np.array([0], dtype=float)
+ index = np.array(0, dtype=np.int32)
+ x[index]
+
+ def test_binary_repr_0_width(self):
+ assert_equal(np.binary_repr(0, width=3), '000')
+
+ def test_fromstring(self):
+ assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
+ [12, 9, 9])
+
+ def test_searchsorted_variable_length(self):
+ x = np.array(['a', 'aa', 'b'])
+ y = np.array(['d', 'e'])
+ assert_equal(x.searchsorted(y), [3, 3])
+
+ def test_string_argsort_with_zeros(self):
+ # Check argsort for strings containing zeros.
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+ assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
+ assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
+
+ def test_string_sort_with_zeros(self):
+ # Check sort for strings containing zeros.
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+ y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
+ assert_array_equal(np.sort(x, kind="q"), y)
+
+ def test_copy_detection_zero_dim(self):
+ # Ticket #658
+ np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+ def test_flat_byteorder(self):
+ # Ticket #657
+ x = np.arange(10)
+ assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')):
+ x = np.array([-1, 0, 1], dtype=dt)
+ assert_equal(x.flat[0].dtype, x[0].dtype)
+
+ def test_copy_detection_corner_case(self):
+ # Ticket #658
+ np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
+ # 0-sized reshape itself is tested elsewhere.
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride checking")
+ def test_copy_detection_corner_case2(self):
+ # Ticket #771: strides are not set correctly when reshaping 0-sized
+ # arrays
+ b = np.indices((0, 3, 4)).T.reshape(-1, 3)
+ assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
+
+ def test_object_array_refcounting(self):
+ # Ticket #633
+ if not hasattr(sys, 'getrefcount'):
+ return
+
+ # NB. this is probably CPython-specific
+
+ cnt = sys.getrefcount
+
+ a = object()
+ b = object()
+ c = object()
+
+ cnt0_a = cnt(a)
+ cnt0_b = cnt(b)
+ cnt0_c = cnt(c)
+
+ # -- 0d -> 1-d broadcast slice assignment
+
+ arr = np.zeros(5, dtype=np.object_)
+
+ arr[:] = a
+ assert_equal(cnt(a), cnt0_a + 5)
+
+ arr[:] = b
+ assert_equal(cnt(a), cnt0_a)
+ assert_equal(cnt(b), cnt0_b + 5)
+
+ arr[:2] = c
+ assert_equal(cnt(b), cnt0_b + 3)
+ assert_equal(cnt(c), cnt0_c + 2)
+
+ del arr
+
+ # -- 1-d -> 2-d broadcast slice assignment
+
+ arr = np.zeros((5, 2), dtype=np.object_)
+ arr0 = np.zeros(2, dtype=np.object_)
+
+ arr0[0] = a
+ assert_(cnt(a) == cnt0_a + 1)
+ arr0[1] = b
+ assert_(cnt(b) == cnt0_b + 1)
+
+ arr[:, :] = arr0
+ assert_(cnt(a) == cnt0_a + 6)
+ assert_(cnt(b) == cnt0_b + 6)
+
+ arr[:, 0] = None
+ assert_(cnt(a) == cnt0_a + 1)
+
+ del arr, arr0
+
+ # -- 2-d copying + flattening
+
+ arr = np.zeros((5, 2), dtype=np.object_)
+
+ arr[:, 0] = a
+ arr[:, 1] = b
+ assert_(cnt(a) == cnt0_a + 5)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ arr2 = arr.copy()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 10)
+
+ arr2 = arr[:, 0].copy()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ arr2 = arr.flatten()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 10)
+
+ del arr, arr2
+
+ # -- concatenate, repeat, take, choose
+
+ arr1 = np.zeros((5, 1), dtype=np.object_)
+ arr2 = np.zeros((5, 1), dtype=np.object_)
+
+ arr1[...] = a
+ arr2[...] = b
+ assert_(cnt(a) == cnt0_a + 5)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ tmp = np.concatenate((arr1, arr2))
+ assert_(cnt(a) == cnt0_a + 5 + 5)
+ assert_(cnt(b) == cnt0_b + 5 + 5)
+
+ tmp = arr1.repeat(3, axis=0)
+ assert_(cnt(a) == cnt0_a + 5 + 3*5)
+
+ tmp = arr1.take([1, 2, 3], axis=0)
+ assert_(cnt(a) == cnt0_a + 5 + 3)
+
+ x = np.array([[0], [1], [0], [1], [1]], int)
+ tmp = x.choose(arr1, arr2)
+ assert_(cnt(a) == cnt0_a + 5 + 2)
+ assert_(cnt(b) == cnt0_b + 5 + 3)
+
+ del tmp # Avoid pyflakes unused variable warning
+
+ def test_mem_custom_float_to_array(self):
+ # Ticket 702
+ class MyFloat(object):
+ def __float__(self):
+ return 1.0
+
+ tmp = np.atleast_1d([MyFloat()])
+ tmp.astype(float) # Should succeed
+
+ def test_object_array_refcount_self_assign(self):
+ # Ticket #711
+ class VictimObject(object):
+ deleted = False
+
+ def __del__(self):
+ self.deleted = True
+
+ d = VictimObject()
+ arr = np.zeros(5, dtype=np.object_)
+ arr[:] = d
+ del d
+ arr[:] = arr # refcount of 'd' might hit zero here
+ assert_(not arr[0].deleted)
+ arr[:] = arr # trying to induce a segfault by doing it again...
+ assert_(not arr[0].deleted)
+
+ def test_mem_fromiter_invalid_dtype_string(self):
+ x = [1, 2, 3]
+ assert_raises(ValueError,
+ np.fromiter, [xi for xi in x], dtype='S')
+
+ def test_reduce_big_object_array(self):
+ # Ticket #713
+ oldsize = np.setbufsize(10*16)
+ a = np.array([None]*161, object)
+ assert_(not np.any(a))
+ np.setbufsize(oldsize)
+
+ def test_mem_0d_array_index(self):
+ # Ticket #714
+ np.zeros(10)[np.array(0)]
+
+ def test_nonnative_endian_fill(self):
+ # Non-native endian arrays were incorrectly filled with scalars
+ # before r5034.
+ if sys.byteorder == 'little':
+ dtype = np.dtype('>i4')
+ else:
+ dtype = np.dtype('= 3:
+ f = open(filename, 'rb')
+ xp = pickle.load(f, encoding='latin1')
+ f.close()
+ else:
+ f = open(filename)
+ xp = pickle.load(f)
+ f.close()
+ xpd = xp.astype(np.float64)
+ assert_((xp.__array_interface__['data'][0] !=
+ xpd.__array_interface__['data'][0]))
+
+ def test_compress_small_type(self):
+ # Ticket #789, changeset 5217.
+ # compress with out argument segfaulted if cannot cast safely
+ import numpy as np
+ a = np.array([[1, 2], [3, 4]])
+ b = np.zeros((2, 1), dtype=np.single)
+ try:
+ a.compress([True, False], axis=1, out=b)
+ raise AssertionError("compress with an out which cannot be "
+ "safely casted should not return "
+ "successfully")
+ except TypeError:
+ pass
+
+ def test_attributes(self):
+ # Ticket #791
+ class TestArray(np.ndarray):
+ def __new__(cls, data, info):
+ result = np.array(data)
+ result = result.view(cls)
+ result.info = info
+ return result
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, 'info', '')
+
+ dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
+ assert_(dat.info == 'jubba')
+ dat.resize((4, 2))
+ assert_(dat.info == 'jubba')
+ dat.sort()
+ assert_(dat.info == 'jubba')
+ dat.fill(2)
+ assert_(dat.info == 'jubba')
+ dat.put([2, 3, 4], [6, 3, 4])
+ assert_(dat.info == 'jubba')
+ dat.setfield(4, np.int32, 0)
+ assert_(dat.info == 'jubba')
+ dat.setflags()
+ assert_(dat.info == 'jubba')
+ assert_(dat.all(1).info == 'jubba')
+ assert_(dat.any(1).info == 'jubba')
+ assert_(dat.argmax(1).info == 'jubba')
+ assert_(dat.argmin(1).info == 'jubba')
+ assert_(dat.argsort(1).info == 'jubba')
+ assert_(dat.astype(TestArray).info == 'jubba')
+ assert_(dat.byteswap().info == 'jubba')
+ assert_(dat.clip(2, 7).info == 'jubba')
+ assert_(dat.compress([0, 1, 1]).info == 'jubba')
+ assert_(dat.conj().info == 'jubba')
+ assert_(dat.conjugate().info == 'jubba')
+ assert_(dat.copy().info == 'jubba')
+ dat2 = TestArray([2, 3, 1, 0], 'jubba')
+ choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ [20, 21, 22, 23], [30, 31, 32, 33]]
+ assert_(dat2.choose(choices).info == 'jubba')
+ assert_(dat.cumprod(1).info == 'jubba')
+ assert_(dat.cumsum(1).info == 'jubba')
+ assert_(dat.diagonal().info == 'jubba')
+ assert_(dat.flatten().info == 'jubba')
+ assert_(dat.getfield(np.int32, 0).info == 'jubba')
+ assert_(dat.imag.info == 'jubba')
+ assert_(dat.max(1).info == 'jubba')
+ assert_(dat.mean(1).info == 'jubba')
+ assert_(dat.min(1).info == 'jubba')
+ assert_(dat.newbyteorder().info == 'jubba')
+ assert_(dat.prod(1).info == 'jubba')
+ assert_(dat.ptp(1).info == 'jubba')
+ assert_(dat.ravel().info == 'jubba')
+ assert_(dat.real.info == 'jubba')
+ assert_(dat.repeat(2).info == 'jubba')
+ assert_(dat.reshape((2, 4)).info == 'jubba')
+ assert_(dat.round().info == 'jubba')
+ assert_(dat.squeeze().info == 'jubba')
+ assert_(dat.std(1).info == 'jubba')
+ assert_(dat.sum(1).info == 'jubba')
+ assert_(dat.swapaxes(0, 1).info == 'jubba')
+ assert_(dat.take([2, 3, 5]).info == 'jubba')
+ assert_(dat.transpose().info == 'jubba')
+ assert_(dat.T.info == 'jubba')
+ assert_(dat.var(1).info == 'jubba')
+ assert_(dat.view(TestArray).info == 'jubba')
+ # These methods do not preserve subclasses
+ assert_(type(dat.nonzero()[0]) is np.ndarray)
+ assert_(type(dat.nonzero()[1]) is np.ndarray)
+
+ def test_recarray_tolist(self):
+ # Ticket #793, changeset r5215
+ # Comparisons fail for NaN, so we can't use random memory
+ # for the test.
+ buf = np.zeros(40, dtype=np.int8)
+ a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
+ b = a.tolist()
+ assert_( a[0].tolist() == b[0])
+ assert_( a[1].tolist() == b[1])
+
+ def test_nonscalar_item_method(self):
+ # Make sure that .item() fails graciously when it should
+ a = np.arange(5)
+ assert_raises(ValueError, a.item)
+
+ def test_char_array_creation(self):
+ a = np.array('123', dtype='c')
+ b = np.array([b'1', b'2', b'3'])
+ assert_equal(a, b)
+
+ def test_unaligned_unicode_access(self):
+ # Ticket #825
+ for i in range(1, 9):
+ msg = 'unicode offset: %d chars' % i
+ t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
+ x = np.array([(b'a', u'b')], dtype=t)
+ if sys.version_info[0] >= 3:
+ assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
+ else:
+ assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
+
+ def test_sign_for_complex_nan(self):
+ # Ticket 794.
+ with np.errstate(invalid='ignore'):
+ C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
+ have = np.sign(C)
+ want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
+ assert_equal(have, want)
+
+ def test_for_equal_names(self):
+ # Ticket #674
+ dt = np.dtype([('foo', float), ('bar', float)])
+ a = np.zeros(10, dt)
+ b = list(a.dtype.names)
+ b[0] = "notfoo"
+ a.dtype.names = b
+ assert_(a.dtype.names[0] == "notfoo")
+ assert_(a.dtype.names[1] == "bar")
+
+ def test_for_object_scalar_creation(self):
+ # Ticket #816
+ a = np.object_()
+ b = np.object_(3)
+ b2 = np.object_(3.0)
+ c = np.object_([4, 5])
+ d = np.object_([None, {}, []])
+ assert_(a is None)
+ assert_(type(b) is int)
+ assert_(type(b2) is float)
+ assert_(type(c) is np.ndarray)
+ assert_(c.dtype == object)
+ assert_(d.dtype == object)
+
+ def test_array_resize_method_system_error(self):
+ # Ticket #840 - order should be an invalid keyword.
+ x = np.array([[0, 1], [2, 3]])
+ assert_raises(TypeError, x.resize, (2, 2), order='C')
+
+ def test_for_zero_length_in_choose(self):
+ "Ticket #882"
+ a = np.array(1)
+ assert_raises(ValueError, lambda x: x.choose([]), a)
+
+ def test_array_ndmin_overflow(self):
+ "Ticket #947."
+ assert_raises(ValueError, lambda: np.array([1], ndmin=33))
+
+ def test_void_scalar_with_titles(self):
+ # No ticket
+ data = [('john', 4), ('mary', 5)]
+ dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
+ arr = np.array(data, dtype=dtype1)
+ assert_(arr[0][0] == 'john')
+ assert_(arr[0][1] == 4)
+
+ def test_void_scalar_constructor(self):
+ #Issue #1550
+
+ #Create test string data, construct void scalar from data and assert
+ #that void scalar contains original data.
+ test_string = np.array("test")
+ test_string_void_scalar = np.core.multiarray.scalar(
+ np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
+
+ assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
+
+ #Create record scalar, construct from data and assert that
+ #reconstructed scalar is correct.
+ test_record = np.ones((), "i,i")
+ test_record_void_scalar = np.core.multiarray.scalar(
+ test_record.dtype, test_record.tobytes())
+
+ assert_(test_record_void_scalar == test_record)
+
+ # Test pickle and unpickle of void and record scalars
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)) == test_string)
+ assert_(pickle.loads(
+ pickle.dumps(test_record, protocol=proto)) == test_record)
+
+ def test_blasdot_uninitialized_memory(self):
+ # Ticket #950
+ for m in [0, 1, 2]:
+ for n in [0, 1, 2]:
+ for k in range(3):
+ # Try to ensure that x->data contains non-zero floats
+ x = np.array([123456789e199], dtype=np.float64)
+ if IS_PYPY:
+ x.resize((m, 0), refcheck=False)
+ else:
+ x.resize((m, 0))
+ y = np.array([123456789e199], dtype=np.float64)
+ if IS_PYPY:
+ y.resize((0, n), refcheck=False)
+ else:
+ y.resize((0, n))
+
+ # `dot` should just return zero (m, n) matrix
+ z = np.dot(x, y)
+ assert_(np.all(z == 0))
+ assert_(z.shape == (m, n))
+
+ def test_zeros(self):
+ # Regression test for #1061.
+ # Set a size which cannot fit into a 64 bits signed integer
+ sz = 2 ** 64
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed dimension exceeded'):
+ np.empty(sz)
+
+ def test_huge_arange(self):
+ # Regression test for #1062.
+ # Set a size which cannot fit into a 64 bits signed integer
+ sz = 2 ** 64
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed size exceeded'):
+ np.arange(sz)
+ assert_(np.size == sz)
+
+ def test_fromiter_bytes(self):
+ # Ticket #1058
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
+ assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+ def test_array_from_sequence_scalar_array(self):
+ # Ticket #1078: segfaults when creating an array with a sequence of
+ # 0d arrays.
+ a = np.array((np.ones(2), np.array(2)))
+ assert_equal(a.shape, (2,))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_equal(a[0], np.ones(2))
+ assert_equal(a[1], np.array(2))
+
+ a = np.array(((1,), np.array(1)))
+ assert_equal(a.shape, (2,))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_equal(a[0], (1,))
+ assert_equal(a[1], np.array(1))
+
+ def test_array_from_sequence_scalar_array2(self):
+ # Ticket #1081: weird array with strange input...
+ t = np.array([np.array([]), np.array(0, object)])
+ assert_equal(t.shape, (2,))
+ assert_equal(t.dtype, np.dtype(object))
+
+ def test_array_too_big(self):
+ # Ticket #1080.
+ assert_raises(ValueError, np.zeros, [975]*7, np.int8)
+ assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
+
+ def test_dtype_keyerrors_(self):
+ # Ticket #1106.
+ dt = np.dtype([('f1', np.uint)])
+ assert_raises(KeyError, dt.__getitem__, "f2")
+ assert_raises(IndexError, dt.__getitem__, 1)
+ assert_raises(TypeError, dt.__getitem__, 0.0)
+
+ def test_lexsort_buffer_length(self):
+ # Ticket #1217, don't segfault.
+ a = np.ones(100, dtype=np.int8)
+ b = np.ones(100, dtype=np.int32)
+ i = np.lexsort((a[::-1], b))
+ assert_equal(i, np.arange(100, dtype=int))
+
+ def test_object_array_to_fixed_string(self):
+ # Ticket #1235.
+ a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
+ b = np.array(a, dtype=(np.str_, 8))
+ assert_equal(a, b)
+ c = np.array(a, dtype=(np.str_, 5))
+ assert_equal(c, np.array(['abcde', 'ijklm']))
+ d = np.array(a, dtype=(np.str_, 12))
+ assert_equal(a, d)
+ e = np.empty((2, ), dtype=(np.str_, 8))
+ e[:] = a[:]
+ assert_equal(a, e)
+
+ def test_unicode_to_string_cast(self):
+ # Ticket #1240.
+ a = np.array([[u'abc', u'\u03a3'],
+ [u'asdf', u'erw']],
+ dtype='U')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S4')
+
+ def test_mixed_string_unicode_array_creation(self):
+ a = np.array(['1234', u'123'])
+ assert_(a.itemsize == 16)
+ a = np.array([u'123', '1234'])
+ assert_(a.itemsize == 16)
+ a = np.array(['1234', u'123', '12345'])
+ assert_(a.itemsize == 20)
+ a = np.array([u'123', '1234', u'12345'])
+ assert_(a.itemsize == 20)
+ a = np.array([u'123', '1234', u'1234'])
+ assert_(a.itemsize == 16)
+
+ def test_misaligned_objects_segfault(self):
+ # Ticket #1198 and #1267
+ a1 = np.zeros((10,), dtype='O,c')
+ a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
+ a1['f0'] = a2
+ repr(a1)
+ np.argmax(a1['f0'])
+ a1['f0'][1] = "FOO"
+ a1['f0'] = "FOO"
+ np.array(a1['f0'], dtype='S')
+ np.nonzero(a1['f0'])
+ a1.sort()
+ copy.deepcopy(a1)
+
+ def test_misaligned_scalars_segfault(self):
+ # Ticket #1267
+ s1 = np.array(('a', 'Foo'), dtype='c,O')
+ s2 = np.array(('b', 'Bar'), dtype='c,O')
+ s1['f1'] = s2['f1']
+ s1['f1'] = 'Baz'
+
+ def test_misaligned_dot_product_objects(self):
+ # Ticket #1267
+ # This didn't require a fix, but it's worth testing anyway, because
+ # it may fail if .dot stops enforcing the arrays to be BEHAVED
+ a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
+ b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
+ np.dot(a['f0'], b['f0'])
+
+ def test_byteswap_complex_scalar(self):
+ # Ticket #1259 and gh-441
+ for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
+ z = np.array([2.2-1.1j], dtype)
+ x = z[0] # always native-endian
+ y = x.byteswap()
+ if x.dtype.byteorder == z.dtype.byteorder:
+ # little-endian machine
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
+ else:
+ # big-endian machine
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
+ # double check real and imaginary parts:
+ assert_equal(x.real, y.real.byteswap())
+ assert_equal(x.imag, y.imag.byteswap())
+
+ def test_structured_arrays_with_objects1(self):
+ # Ticket #1299
+ stra = 'aaaa'
+ strb = 'bbbb'
+ x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+ x[x.nonzero()] = x.ravel()[:1]
+ assert_(x[0, 1] == x[0, 0])
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_structured_arrays_with_objects2(self):
+ # Ticket #1299 second test
+ stra = 'aaaa'
+ strb = 'bbbb'
+ numb = sys.getrefcount(strb)
+ numa = sys.getrefcount(stra)
+ x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+ x[x.nonzero()] = x.ravel()[:1]
+ assert_(sys.getrefcount(strb) == numb)
+ assert_(sys.getrefcount(stra) == numa + 2)
+
+ def test_duplicate_title_and_name(self):
+ # Ticket #1254
+ dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
+ assert_raises(ValueError, np.dtype, dtspec)
+
+ def test_signed_integer_division_overflow(self):
+ # Ticket #1317.
+ def test_type(t):
+ min = np.array([np.iinfo(t).min])
+ min //= -1
+
+ with np.errstate(divide="ignore"):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
+ test_type(t)
+
+ def test_buffer_hashlib(self):
+ try:
+ from hashlib import md5
+ except ImportError:
+ from md5 import new as md5
+
+ x = np.array([1, 2, 3], dtype=np.dtype('c')
+
+ def test_log1p_compiler_shenanigans(self):
+ # Check if log1p is behaving on 32 bit intel systems.
+ assert_(np.isfinite(np.log1p(np.exp2(-53))))
+
+ def test_fromiter_comparison(self):
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
+ assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+ def test_fromstring_crash(self):
+ # Ticket #1345: the following should not cause a crash
+ np.fromstring(b'aa, aa, 1.0', sep=',')
+
+ def test_ticket_1539(self):
+ dtypes = [x for x in np.typeDict.values()
+ if (issubclass(x, np.number)
+ and not issubclass(x, np.timedelta64))]
+ a = np.array([], np.bool_) # not x[0] because it is unordered
+ failures = []
+
+ for x in dtypes:
+ b = a.astype(x)
+ for y in dtypes:
+ c = a.astype(y)
+ try:
+ np.dot(b, c)
+ except TypeError:
+ failures.append((x, y))
+ if failures:
+ raise AssertionError("Failures: %r" % failures)
+
+ def test_ticket_1538(self):
+ x = np.finfo(np.float32)
+ for name in 'eps epsneg max min resolution tiny'.split():
+ assert_equal(type(getattr(x, name)), np.float32,
+ err_msg=name)
+
+ def test_ticket_1434(self):
+ # Check that the out= argument in var and std has an effect
+ data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
+ out = np.zeros((3,))
+
+ ret = data.var(axis=1, out=out)
+ assert_(ret is out)
+ assert_array_equal(ret, data.var(axis=1))
+
+ ret = data.std(axis=1, out=out)
+ assert_(ret is out)
+ assert_array_equal(ret, data.std(axis=1))
+
+ def test_complex_nan_maximum(self):
+ cnan = complex(0, np.nan)
+ assert_equal(np.maximum(1, cnan), cnan)
+
+ def test_subclass_int_tuple_assignment(self):
+ # ticket #1563
+ class Subclass(np.ndarray):
+ def __new__(cls, i):
+ return np.ones((i,)).view(cls)
+
+ x = Subclass(5)
+ x[(0,)] = 2 # shouldn't raise an exception
+ assert_equal(x[0], 2)
+
+ def test_ufunc_no_unnecessary_views(self):
+ # ticket #1548
+ class Subclass(np.ndarray):
+ pass
+ x = np.array([1, 2, 3]).view(Subclass)
+ y = np.add(x, x, x)
+ assert_equal(id(x), id(y))
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_take_refcount(self):
+ # ticket #939
+ a = np.arange(16, dtype=float)
+ a.shape = (4, 4)
+ lut = np.ones((5 + 3, 4), float)
+ rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
+ c1 = sys.getrefcount(rgba)
+ try:
+ lut.take(a, axis=0, mode='clip', out=rgba)
+ except TypeError:
+ pass
+ c2 = sys.getrefcount(rgba)
+ assert_equal(c1, c2)
+
+ def test_fromfile_tofile_seeks(self):
+ # On Python 3, tofile/fromfile used to get (#1610) the Python
+ # file handle out of sync
+ f0 = tempfile.NamedTemporaryFile()
+ f = f0.file
+ f.write(np.arange(255, dtype='u1').tobytes())
+
+ f.seek(20)
+ ret = np.fromfile(f, count=4, dtype='u1')
+ assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
+ assert_equal(f.tell(), 24)
+
+ f.seek(40)
+ np.array([1, 2, 3], dtype='u1').tofile(f)
+ assert_equal(f.tell(), 43)
+
+ f.seek(40)
+ data = f.read(3)
+ assert_equal(data, b"\x01\x02\x03")
+
+ f.seek(80)
+ f.read(4)
+ data = np.fromfile(f, dtype='u1', count=4)
+ assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
+
+ f.close()
+
+ def test_complex_scalar_warning(self):
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = tp(1+2j)
+ assert_warns(np.ComplexWarning, float, x)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+ assert_equal(float(x), float(x.real))
+
+ def test_complex_scalar_complex_cast(self):
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = tp(1+2j)
+ assert_equal(complex(x), 1+2j)
+
+ def test_complex_boolean_cast(self):
+ # Ticket #2218
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
+ assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
+ assert_(np.any(x))
+ assert_(np.all(x[1:]))
+
+ def test_uint_int_conversion(self):
+ x = 2**64 - 1
+ assert_equal(int(np.uint64(x)), x)
+
+ def test_duplicate_field_names_assign(self):
+ ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
+ ra.dtype.names = ('f1', 'f2')
+ repr(ra) # should not cause a segmentation fault
+ assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
+
+ def test_eq_string_and_object_array(self):
+ # From e-mail thread "__eq__ with str and object" (Keith Goodman)
+ a1 = np.array(['a', 'b'], dtype=object)
+ a2 = np.array(['a', 'c'])
+ assert_array_equal(a1 == a2, [True, False])
+ assert_array_equal(a2 == a1, [True, False])
+
+ def test_nonzero_byteswap(self):
+ a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
+ a.dtype = np.float32
+ assert_equal(a.nonzero()[0], [1])
+ a = a.byteswap().newbyteorder()
+ assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
+
+ def test_find_common_type_boolean(self):
+ # Ticket #1695
+ assert_(np.find_common_type([], ['?', '?']) == '?')
+
+ def test_empty_mul(self):
+ a = np.array([1.])
+ a[1:1] *= 2
+ assert_equal(a, [1.])
+
+ def test_array_side_effect(self):
+ # The second use of itemsize was throwing an exception because in
+ # ctors.c, discover_itemsize was calling PyObject_Length without
+ # checking the return code. This failed to get the length of the
+ # number 2, and the exception hung around until something checked
+ # PyErr_Occurred() and returned an error.
+ assert_equal(np.dtype('S10').itemsize, 10)
+ np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
+ assert_equal(np.dtype('S10').itemsize, 10)
+
+ def test_any_float(self):
+ # all and any for floats
+ a = np.array([0.1, 0.9])
+ assert_(np.any(a))
+ assert_(np.all(a))
+
+ def test_large_float_sum(self):
+ a = np.arange(10000, dtype='f')
+ assert_equal(a.sum(dtype='d'), a.astype('d').sum())
+
+ def test_ufunc_casting_out(self):
+ a = np.array(1.0, dtype=np.float32)
+ b = np.array(1.0, dtype=np.float64)
+ c = np.array(1.0, dtype=np.float32)
+ np.add(a, b, out=c)
+ assert_equal(c, 2.0)
+
+ def test_array_scalar_contiguous(self):
+ # Array scalars are both C and Fortran contiguous
+ assert_(np.array(1.0).flags.c_contiguous)
+ assert_(np.array(1.0).flags.f_contiguous)
+ assert_(np.array(np.float32(1.0)).flags.c_contiguous)
+ assert_(np.array(np.float32(1.0)).flags.f_contiguous)
+
+ def test_squeeze_contiguous(self):
+ # Similar to GitHub issue #387
+ a = np.zeros((1, 2)).squeeze()
+ b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
+ assert_(a.flags.c_contiguous)
+ assert_(a.flags.f_contiguous)
+ assert_(b.flags.f_contiguous)
+
+ def test_squeeze_axis_handling(self):
+ # Issue #10779
+ # Ensure proper handling of objects
+ # that don't support axis specification
+ # when squeezing
+
+ class OldSqueeze(np.ndarray):
+
+ def __new__(cls,
+ input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+ # it is perfectly reasonable that prior
+ # to numpy version 1.7.0 a subclass of ndarray
+ # might have been created that did not expect
+ # squeeze to have an axis argument
+ # NOTE: this example is somewhat artificial;
+ # it is designed to simulate an old API
+ # expectation to guard against regression
+ def squeeze(self):
+ return super(OldSqueeze, self).squeeze()
+
+ oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
+
+ # if no axis argument is specified the old API
+ # expectation should give the correct result
+ assert_equal(np.squeeze(oldsqueeze),
+ np.array([1,2,3]))
+
+ # likewise, axis=None should work perfectly well
+ # with the old API expectation
+ assert_equal(np.squeeze(oldsqueeze, axis=None),
+ np.array([1,2,3]))
+
+ # however, specification of any particular axis
+ # should raise a TypeError in the context of the
+ # old API specification, even when using a valid
+ # axis specification like 1 for this array
+ with assert_raises(TypeError):
+ # this would silently succeed for array
+ # subclasses / objects that did not support
+ # squeeze axis argument handling before fixing
+ # Issue #10779
+ np.squeeze(oldsqueeze, axis=1)
+
+ # check for the same behavior when using an invalid
+ # axis specification -- in this case axis=0 does not
+ # have size 1, but the priority should be to raise
+ # a TypeError for the axis argument and NOT a
+ # ValueError for squeezing a non-empty dimension
+ with assert_raises(TypeError):
+ np.squeeze(oldsqueeze, axis=0)
+
+ # the new API knows how to handle the axis
+ # argument and will return a ValueError if
+ # attempting to squeeze an axis that is not
+ # of length 1
+ with assert_raises(ValueError):
+ np.squeeze(np.array([[1],[2],[3]]), axis=0)
+
+ def test_reduce_contiguous(self):
+ # GitHub issue #387
+ a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
+ b = np.add.reduce(np.zeros((2, 1, 2)), 1)
+ assert_(a.flags.c_contiguous)
+ assert_(a.flags.f_contiguous)
+ assert_(b.flags.c_contiguous)
+
+ def test_object_array_self_reference(self):
+ # Object arrays with references to themselves can cause problems
+ a = np.array(0, dtype=object)
+ a[()] = a
+ assert_raises(RecursionError, int, a)
+ assert_raises(RecursionError, long, a)
+ assert_raises(RecursionError, float, a)
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_raises(RecursionError, oct, a)
+ assert_raises(RecursionError, hex, a)
+ a[()] = None
+
+ def test_object_array_circular_reference(self):
+ # Test the same for a circular reference.
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ b[()] = a
+ assert_raises(RecursionError, int, a)
+ # NumPy has no tp_traverse currently, so circular references
+ # cannot be detected. So resolve it:
+ a[()] = None
+
+ # This was causing a to become like the above
+ a = np.array(0, dtype=object)
+ a[...] += 1
+ assert_equal(a, 1)
+
+ def test_object_array_nested(self):
+ # but is fine with a reference to a different array
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ assert_equal(int(a), int(0))
+ assert_equal(long(a), long(0))
+ assert_equal(float(a), float(0))
+ if sys.version_info.major == 2:
+ # in python 3, this falls back on operator.index, which fails on
+ # on dtype=object
+ assert_equal(oct(a), oct(0))
+ assert_equal(hex(a), hex(0))
+
+ def test_object_array_self_copy(self):
+ # An object array being copied into itself DECREF'ed before INCREF'ing
+ # causing segmentation faults (gh-3787)
+ a = np.array(object(), dtype=object)
+ np.copyto(a, a)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a[()]) == 2)
+ a[()].__class__ # will segfault if object was deleted
+
+ def test_zerosize_accumulate(self):
+ "Ticket #1733"
+ x = np.array([[42, 0]], dtype=np.uint32)
+ assert_equal(np.add.accumulate(x[:-1, 0]), [])
+
+ def test_objectarray_setfield(self):
+ # Setfield should not overwrite Object fields with non-Object data
+ x = np.array([1, 2, 3], dtype=object)
+ assert_raises(TypeError, x.setfield, 4, np.int32, 0)
+
+ def test_setting_rank0_string(self):
+ "Ticket #1736"
+ s1 = b"hello1"
+ s2 = b"hello2"
+ a = np.zeros((), dtype="S10")
+ a[()] = s1
+ assert_equal(a, np.array(s1))
+ a[()] = np.array(s2)
+ assert_equal(a, np.array(s2))
+
+ a = np.zeros((), dtype='f4')
+ a[()] = 3
+ assert_equal(a, np.array(3))
+ a[()] = np.array(4)
+ assert_equal(a, np.array(4))
+
+ def test_string_astype(self):
+ "Ticket #1748"
+ s1 = b'black'
+ s2 = b'white'
+ s3 = b'other'
+ a = np.array([[s1], [s2], [s3]])
+ assert_equal(a.dtype, np.dtype('S5'))
+ b = a.astype(np.dtype('S0'))
+ assert_equal(b.dtype, np.dtype('S5'))
+
+ def test_ticket_1756(self):
+ # Ticket #1756
+ s = b'0123456789abcdef'
+ a = np.array([s]*5)
+ for i in range(1, 17):
+ a1 = np.array(a, "|S%d" % i)
+ a2 = np.array([s[:i]]*5)
+ assert_equal(a1, a2)
+
+ def test_fields_strides(self):
+ "gh-2355"
+ r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
+ assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
+ assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
+ assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
+ assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
+
+ def test_alignment_update(self):
+ # Check that alignment flag is updated on stride setting
+ a = np.arange(10)
+ assert_(a.flags.aligned)
+ a.strides = 3
+ assert_(not a.flags.aligned)
+
+ def test_ticket_1770(self):
+ "Should not segfault on python 3k"
+ import numpy as np
+ try:
+ a = np.zeros((1,), dtype=[('f1', 'f')])
+ a['f1'] = 1
+ a['f2'] = 1
+ except ValueError:
+ pass
+ except Exception:
+ raise AssertionError
+
+ def test_ticket_1608(self):
+ "x.flat shouldn't modify data"
+ x = np.array([[1, 2], [3, 4]]).T
+ np.array(x.flat)
+ assert_equal(x, [[1, 3], [2, 4]])
+
+ def test_pickle_string_overwrite(self):
+ import re
+
+ data = np.array([1], dtype='b')
+ blob = pickle.dumps(data, protocol=1)
+ data = pickle.loads(blob)
+
+ # Check that loads does not clobber interned strings
+ s = re.sub("a(.)", "\x01\\1", "a_")
+ assert_equal(s[0], "\x01")
+ data[0] = 0xbb
+ s = re.sub("a(.)", "\x01\\1", "a_")
+ assert_equal(s[0], "\x01")
+
+ def test_pickle_bytes_overwrite(self):
+ if sys.version_info[0] >= 3:
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ data = np.array([1], dtype='b')
+ data = pickle.loads(pickle.dumps(data, protocol=proto))
+ data[0] = 0xdd
+ bytestring = "\x01 ".encode('ascii')
+ assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+
+ def test_pickle_py2_array_latin1_hack(self):
+ # Check that unpickling hacks in Py3 that support
+ # encoding='latin1' work correctly.
+
+ # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
+ data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
+ b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
+ b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
+ b"p13\ntp14\nb.")
+ if sys.version_info[0] >= 3:
+ # This should work:
+ result = pickle.loads(data, encoding='latin1')
+ assert_array_equal(result, np.array([129], dtype='b'))
+ # Should not segfault:
+ assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
+
+ def test_pickle_py2_scalar_latin1_hack(self):
+ # Check that scalar unpickling hack in Py3 that supports
+ # encoding='latin1' work correctly.
+
+ # Python2 output for pickle.dumps(...)
+ datas = [
+ # (original, python2_pickle, koi8r_validity)
+ (np.unicode_('\u6bd2'),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+ b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
+ b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
+ 'invalid'),
+
+ (np.float64(9e123),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
+ b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
+ b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
+ 'invalid'),
+
+ (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
+ b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
+ b"tp8\nRp9\n."),
+ 'different'),
+ ]
+ if sys.version_info[0] >= 3:
+ for original, data, koi8r_validity in datas:
+ result = pickle.loads(data, encoding='latin1')
+ assert_equal(result, original)
+
+ # Decoding under non-latin1 encoding (e.g.) KOI8-R can
+ # produce bad results, but should not segfault.
+ if koi8r_validity == 'different':
+ # Unicode code points happen to lie within latin1,
+ # but are different in koi8-r, resulting to silent
+ # bogus results
+ result = pickle.loads(data, encoding='koi8-r')
+ assert_(result != original)
+ elif koi8r_validity == 'invalid':
+ # Unicode code points outside latin1, so results
+ # to an encoding exception
+ assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
+ else:
+ raise ValueError(koi8r_validity)
+
+ def test_structured_type_to_object(self):
+ a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
+ a_obj = np.empty((2,), dtype=object)
+ a_obj[0] = (0, 1)
+ a_obj[1] = (3, 2)
+ # astype records -> object
+ assert_equal(a_rec.astype(object), a_obj)
+ # '=' records -> object
+ b = np.empty_like(a_obj)
+ b[...] = a_rec
+ assert_equal(b, a_obj)
+ # '=' object -> records
+ b = np.empty_like(a_rec)
+ b[...] = a_obj
+ assert_equal(b, a_rec)
+
+ def test_assign_obj_listoflists(self):
+ # Ticket # 1870
+ # The inner list should get assigned to the object elements
+ a = np.zeros(4, dtype=object)
+ b = a.copy()
+ a[0] = [1]
+ a[1] = [2]
+ a[2] = [3]
+ a[3] = [4]
+ b[...] = [[1], [2], [3], [4]]
+ assert_equal(a, b)
+ # The first dimension should get broadcast
+ a = np.zeros((2, 2), dtype=object)
+ a[...] = [[1, 2]]
+ assert_equal(a, [[1, 2], [1, 2]])
+
+ def test_memoryleak(self):
+ # Ticket #1917 - ensure that array data doesn't leak
+ for i in range(1000):
+ # 100MB times 1000 would give 100GB of memory usage if it leaks
+ a = np.empty((100000000,), dtype='i1')
+ del a
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_ufunc_reduce_memoryleak(self):
+ a = np.arange(6)
+ acnt = sys.getrefcount(a)
+ np.add.reduce(a)
+ assert_equal(sys.getrefcount(a), acnt)
+
+ def test_search_sorted_invalid_arguments(self):
+ # Ticket #2021, should not segfault.
+ x = np.arange(0, 4, dtype='datetime64[D]')
+ assert_raises(TypeError, x.searchsorted, 1)
+
+ def test_string_truncation(self):
+ # Ticket #1990 - Data can be truncated in creation of an array from a
+ # mixed sequence of numeric values and strings
+ for val in [True, 1234, 123.4, complex(1, 234)]:
+ for tostr in [asunicode, asbytes]:
+ b = np.array([val, tostr('xx')])
+ assert_equal(tostr(b[0]), tostr(val))
+ b = np.array([tostr('xx'), val])
+ assert_equal(tostr(b[1]), tostr(val))
+
+ # test also with longer strings
+ b = np.array([val, tostr('xxxxxxxxxx')])
+ assert_equal(tostr(b[0]), tostr(val))
+ b = np.array([tostr('xxxxxxxxxx'), val])
+ assert_equal(tostr(b[1]), tostr(val))
+
+ def test_string_truncation_ucs2(self):
+ # Ticket #2081. Python compiled with two byte unicode
+ # can lead to truncation if itemsize is not properly
+ # adjusted for NumPy's four byte unicode.
+ if sys.version_info[0] >= 3:
+ a = np.array(['abcd'])
+ else:
+ a = np.array([u'abcd'])
+ assert_equal(a.dtype.itemsize, 16)
+
+ def test_unique_stable(self):
+ # Ticket #2063 must always choose stable sort for argsort to
+ # get consistent results
+ v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
+ res = np.unique(v, return_index=True)
+ tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
+ assert_equal(res, tgt)
+
+ def test_unicode_alloc_dealloc_match(self):
+ # Ticket #1578, the mismatch only showed up when running
+ # python-debug for python versions >= 2.7, and then as
+ # a core dump and error message.
+ a = np.array(['abc'], dtype=np.unicode)[0]
+ del a
+
+ def test_refcount_error_in_clip(self):
+ # Ticket #1588
+ a = np.zeros((2,), dtype='>i2').clip(min=0)
+ x = a + a
+ # This used to segfault:
+ y = str(x)
+ # Check the final string:
+ assert_(y == "[0 0]")
+
+ def test_searchsorted_wrong_dtype(self):
+ # Ticket #2189, it used to segfault, so we check that it raises the
+ # proper exception.
+ a = np.array([('a', 1)], dtype='S1, int')
+ assert_raises(TypeError, np.searchsorted, a, 1.2)
+ # Ticket #2066, similar problem:
+ dtype = np.format_parser(['i4', 'i4'], [], [])
+ a = np.recarray((2, ), dtype)
+ assert_raises(TypeError, np.searchsorted, a, 1)
+
+ def test_complex64_alignment(self):
+ # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
+ dtt = np.complex64
+ arr = np.arange(10, dtype=dtt)
+ # 2D array
+ arr2 = np.reshape(arr, (2, 5))
+ # Fortran write followed by (C or F) read caused bus error
+ data_str = arr2.tobytes('F')
+ data_back = np.ndarray(arr2.shape,
+ arr2.dtype,
+ buffer=data_str,
+ order='F')
+ assert_array_equal(arr2, data_back)
+
+ def test_structured_count_nonzero(self):
+ arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
+ count = np.count_nonzero(arr)
+ assert_equal(count, 0)
+
+ def test_copymodule_preserves_f_contiguity(self):
+ a = np.empty((2, 2), order='F')
+ b = copy.copy(a)
+ c = copy.deepcopy(a)
+ assert_(b.flags.fortran)
+ assert_(b.flags.f_contiguous)
+ assert_(c.flags.fortran)
+ assert_(c.flags.f_contiguous)
+
+ def test_fortran_order_buffer(self):
+ import numpy as np
+ a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
+ arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
+ arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
+ [u'F', u'o', u'o', u'b', u'']]])
+ assert_array_equal(arr, arr2)
+
+ def test_assign_from_sequence_error(self):
+ # Ticket #4024.
+ arr = np.array([1, 2, 3])
+ assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
+ arr.__setitem__(slice(None), [9])
+ assert_equal(arr, [9, 9, 9])
+
+ def test_format_on_flex_array_element(self):
+ # Ticket #4369.
+ dt = np.dtype([('date', '= 3:
+ assert_raises(TypeError, f, lhs, rhs)
+ elif not sys.py3kwarning:
+ # With -3 switch in python 2, DeprecationWarning is raised
+ # which we are not interested in
+ f(lhs, rhs)
+ assert_(not op.eq(lhs, rhs))
+ assert_(op.ne(lhs, rhs))
+
+ def test_richcompare_scalar_and_subclass(self):
+ # gh-4709
+ class Foo(np.ndarray):
+ def __eq__(self, other):
+ return "OK"
+
+ x = np.array([1, 2, 3]).view(Foo)
+ assert_equal(10 == x, "OK")
+ assert_equal(np.int32(10) == x, "OK")
+ assert_equal(np.array([10]) == x, "OK")
+
+ def test_pickle_empty_string(self):
+ # gh-3926
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test_string = np.string_('')
+ assert_equal(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)), test_string)
+
+ def test_frompyfunc_many_args(self):
+ # gh-5672
+
+ def passer(*args):
+ pass
+
+ assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
+
+ def test_repeat_broadcasting(self):
+ # gh-5743
+ a = np.arange(60).reshape(3, 4, 5)
+ for axis in chain(range(-a.ndim, a.ndim), [None]):
+ assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
+
+ def test_frompyfunc_nout_0(self):
+ # gh-2014
+
+ def f(x):
+ x[0], x[-1] = x[-1], x[0]
+
+ uf = np.frompyfunc(f, 1, 0)
+ a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])
+ assert_equal(uf(a), ())
+ assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_leak_in_structured_dtype_comparison(self):
+ # gh-6250
+ recordtype = np.dtype([('a', np.float64),
+ ('b', np.int32),
+ ('d', (str, 5))])
+
+ # Simple case
+ a = np.zeros(2, dtype=recordtype)
+ for i in range(100):
+ a == a
+ assert_(sys.getrefcount(a) < 10)
+
+ # The case in the bug report.
+ before = sys.getrefcount(a)
+ u, v = a[0], a[1]
+ u == v
+ del u, v
+ gc.collect()
+ after = sys.getrefcount(a)
+ assert_equal(before, after)
+
+ def test_empty_percentile(self):
+ # gh-6530 / gh-6553
+ assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
+
+ def test_void_compare_segfault(self):
+ # gh-6922. The following should not segfault
+ a = np.ones(3, dtype=[('object', 'O'), ('int', ' 0:
+ # unpickling ndarray goes through _frombuffer for protocol 5
+ assert b'numpy.core.numeric' in s
+ else:
+ assert b'numpy.core.multiarray' in s
+
+ def test_object_casting_errors(self):
+ # gh-11993
+ arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
+ assert_raises(TypeError, arr.astype, 'c8')
+
+ def test_eff1d_casting(self):
+ # gh-12711
+ x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+ res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
+ assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
+ assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
+
+ def test_pickle_datetime64_array(self):
+ # gh-12745 (would fail with pickle5 installed)
+ d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+ arr = np.array([d])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dumped = pickle.dumps(arr, protocol=proto)
+ assert_equal(pickle.loads(dumped), arr)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc
new file mode 100644
index 0000000..38ee5dd
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_regression.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py
new file mode 100644
index 0000000..b21bc9d
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.py
@@ -0,0 +1,65 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import platform
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_equal, assert_almost_equal, assert_raises, assert_warns,
+ )
+
+class TestFromString(object):
+ def test_floating(self):
+ # Ticket #640, floats from string
+ fsingle = np.single('1.234')
+ fdouble = np.double('1.234')
+ flongdouble = np.longdouble('1.234')
+ assert_almost_equal(fsingle, 1.234)
+ assert_almost_equal(fdouble, 1.234)
+ assert_almost_equal(flongdouble, 1.234)
+
+ def test_floating_overflow(self):
+ """ Strings containing an unrepresentable float overflow """
+ fhalf = np.half('1e10000')
+ assert_equal(fhalf, np.inf)
+ fsingle = np.single('1e10000')
+ assert_equal(fsingle, np.inf)
+ fdouble = np.double('1e10000')
+ assert_equal(fdouble, np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
+ assert_equal(flongdouble, np.inf)
+
+ fhalf = np.half('-1e10000')
+ assert_equal(fhalf, -np.inf)
+ fsingle = np.single('-1e10000')
+ assert_equal(fsingle, -np.inf)
+ fdouble = np.double('-1e10000')
+ assert_equal(fdouble, -np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
+ assert_equal(flongdouble, -np.inf)
+
+ @pytest.mark.skipif((sys.version_info[0] >= 3)
+ or (sys.platform == "win32"
+ and platform.architecture()[0] == "64bit"),
+ reason="numpy.intp('0xff', 16) not supported on Py3 "
+ "or 64 bit Windows")
+ def test_intp(self):
+ # Ticket #99
+ i_width = np.int_(0).nbytes*2 - 1
+ np.intp('0x' + 'f'*i_width, 16)
+ assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
+ assert_raises(ValueError, np.intp, '0x1', 32)
+ assert_equal(255, np.intp('0xFF', 16))
+
+
+class TestFromInt(object):
+ def test_intp(self):
+ # Ticket #99
+ assert_equal(1024, np.intp(1024))
+
+ def test_uint64_from_negative(self):
+ assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc
new file mode 100644
index 0000000..f48f1c8
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalar_ctors.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py
new file mode 100644
index 0000000..cd520d9
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.py
@@ -0,0 +1,105 @@
+"""
+Test scalar buffer interface adheres to PEP 3118
+"""
+import sys
+import numpy as np
+import pytest
+
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# PEP3118 format strings for native (standard alignment and byteorder) types
+scalars_and_codes = [
+ (np.bool_, '?'),
+ (np.byte, 'b'),
+ (np.short, 'h'),
+ (np.intc, 'i'),
+ (np.int_, 'l'),
+ (np.longlong, 'q'),
+ (np.ubyte, 'B'),
+ (np.ushort, 'H'),
+ (np.uintc, 'I'),
+ (np.uint, 'L'),
+ (np.ulonglong, 'Q'),
+ (np.half, 'e'),
+ (np.single, 'f'),
+ (np.double, 'd'),
+ (np.longdouble, 'g'),
+ (np.csingle, 'Zf'),
+ (np.cdouble, 'Zd'),
+ (np.clongdouble, 'Zg'),
+]
+scalars_only, codes_only = zip(*scalars_and_codes)
+
+
+@pytest.mark.skipif(sys.version_info.major < 3,
+ reason="Python 2 scalars lack a buffer interface")
+class TestScalarPEP3118(object):
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_match_array(self, scalar):
+ x = scalar()
+ a = np.array([], dtype=np.dtype(scalar))
+ mv_x = memoryview(x)
+ mv_a = memoryview(a)
+ assert_equal(mv_x.format, mv_a.format)
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_dim(self, scalar):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
+ def test_scalar_known_code(self, scalar, code):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.format, code)
+
+ def test_void_scalar_structured_data(self):
+ dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
+ x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
+ assert_(isinstance(x, np.void))
+ mv_x = memoryview(x)
+ expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
+ expected_size += 2 * np.dtype((np.float64, 1)).itemsize
+ assert_equal(mv_x.itemsize, expected_size)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ # check scalar format string against ndarray format string
+ a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_(isinstance(a, np.ndarray))
+ mv_a = memoryview(a)
+ assert_equal(mv_x.itemsize, mv_a.itemsize)
+ assert_equal(mv_x.format, mv_a.format)
+
+ def test_datetime_memoryview(self):
+ # gh-11656
+ # Values verified with v1.13.3, shape is not () as in test_scalar_dim
+ def as_dict(m):
+ return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+ ndim=m.ndim, format=m.format)
+
+ dt1 = np.datetime64('2016-01-01')
+ dt2 = np.datetime64('2017-01-01')
+ expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1,
+ 'shape': (8,), 'format': 'B'}
+ v = memoryview(dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ v = memoryview(dt2 - dt1)
+ res = as_dict(v)
+ assert_equal(res, expected)
+
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(1, dt)
+ # Fails to create a PEP 3118 valid buffer
+ assert_raises((ValueError, BufferError), memoryview, a[0])
+
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc
new file mode 100644
index 0000000..78a56ce
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarbuffer.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py
new file mode 100644
index 0000000..9e32cf6
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+""" Test printing of scalar types.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from numpy.testing import assert_
+
+
+class A(object):
+ pass
+class B(A, np.float64):
+ pass
+
+class C(B):
+ pass
+class D(C, B):
+ pass
+
+class B0(np.float64, A):
+ pass
+class C0(B0):
+ pass
+
+class TestInherit(object):
+ def test_init(self):
+ x = B(1.0)
+ assert_(str(x) == '1.0')
+ y = C(2.0)
+ assert_(str(y) == '2.0')
+ z = D(3.0)
+ assert_(str(z) == '3.0')
+
+ def test_init2(self):
+ x = B0(1.0)
+ assert_(str(x) == '1.0')
+ y = C0(2.0)
+ assert_(str(y) == '2.0')
+
+
+class TestCharacter(object):
+ def test_char_radd(self):
+ # GH issue 9620, reached gentype_add and raise TypeError
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ s = b'def'
+ u = u'def'
+ assert_(np_s.__radd__(np_s) is NotImplemented)
+ assert_(np_s.__radd__(np_u) is NotImplemented)
+ assert_(np_s.__radd__(s) is NotImplemented)
+ assert_(np_s.__radd__(u) is NotImplemented)
+ assert_(np_u.__radd__(np_s) is NotImplemented)
+ assert_(np_u.__radd__(np_u) is NotImplemented)
+ assert_(np_u.__radd__(s) is NotImplemented)
+ assert_(np_u.__radd__(u) is NotImplemented)
+ assert_(s + np_s == b'defabc')
+ assert_(u + np_u == u'defabc')
+
+
+ class Mystr(str, np.generic):
+ # would segfault
+ pass
+
+ ret = s + Mystr('abc')
+ assert_(type(ret) is type(s))
+
+ def test_char_repeat(self):
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ np_i = np.int(5)
+ res_s = b'abc' * 5
+ res_u = u'abc' * 5
+ assert_(np_s * np_i == res_s)
+ assert_(np_u * np_i == res_u)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc
new file mode 100644
index 0000000..eaade3e
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarinherit.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py
new file mode 100644
index 0000000..51bcf2b
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.py
@@ -0,0 +1,666 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+import itertools
+import operator
+import platform
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_almost_equal,
+ assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
+ assert_warns
+ )
+
+types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
+ np.int_, np.uint, np.longlong, np.ulonglong,
+ np.single, np.double, np.longdouble, np.csingle,
+ np.cdouble, np.clongdouble]
+
+floating_types = np.floating.__subclasses__()
+complex_floating_types = np.complexfloating.__subclasses__()
+
+
+# This compares scalarmath against ufuncs.
+
+class TestTypes(object):
+ def test_types(self):
+ for atype in types:
+ a = atype(1)
+ assert_(a == 1, "error with %r: got %r" % (atype, a))
+
+ def test_type_add(self):
+ # list of types
+ for k, atype in enumerate(types):
+ a_scalar = atype(3)
+ a_array = np.array([3], dtype=atype)
+ for l, btype in enumerate(types):
+ b_scalar = btype(1)
+ b_array = np.array([1], dtype=btype)
+ c_scalar = a_scalar + b_scalar
+ c_array = a_array + b_array
+ # It was comparing the type numbers, but the new ufunc
+ # function-finding mechanism finds the lowest function
+ # to which both inputs can be cast - which produces 'l'
+ # when you do 'q' + 'b'. The old function finding mechanism
+ # skipped ahead based on the first argument, but that
+ # does not produce properly symmetric results...
+ assert_equal(c_scalar.dtype, c_array.dtype,
+ "error with types (%d/'%c' + %d/'%c')" %
+ (k, np.dtype(atype).char, l, np.dtype(btype).char))
+
+ def test_type_create(self):
+ for k, atype in enumerate(types):
+ a = np.array([1, 2, 3], atype)
+ b = atype([1, 2, 3])
+ assert_equal(a, b)
+
+ def test_leak(self):
+ # test leak of scalar objects
+ # a leak would show up in valgrind as still-reachable of ~2.6MB
+ for i in range(200000):
+ np.add(1, 1)
+
+
+class TestBaseMath(object):
+ def test_blocked(self):
+ # test alignments offsets for simd instructions
+ # alignments for vz + 2 * (vs - 1) + 1
+ for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
+ for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
+ type='binary',
+ max_size=sz):
+ exp1 = np.ones_like(inp1)
+ inp1[...] = np.ones_like(inp1)
+ inp2[...] = np.zeros_like(inp2)
+ assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
+ assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
+ assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
+
+ np.add(inp1, inp2, out=out)
+ assert_almost_equal(out, exp1, err_msg=msg)
+
+ inp2[...] += np.arange(inp2.size, dtype=dt) + 1
+ assert_almost_equal(np.square(inp2),
+ np.multiply(inp2, inp2), err_msg=msg)
+ # skip true divide for ints
+ if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
+ assert_almost_equal(np.reciprocal(inp2),
+ np.divide(1, inp2), err_msg=msg)
+
+ inp1[...] = np.ones_like(inp1)
+ np.add(inp1, 2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
+ inp2[...] = np.ones_like(inp2)
+ np.add(2, inp2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_almost_equal(d + d, d * 2)
+ np.add(d, d, out=o)
+ np.add(np.ones_like(d), d, out=o)
+ np.add(d, np.ones_like(d), out=o)
+ np.add(np.ones_like(d), d)
+ np.add(d, np.ones_like(d))
+
+
+class TestPower(object):
+ def test_small_types(self):
+ for t in [np.int8, np.int16, np.float16]:
+ a = t(3)
+ b = a ** 4
+ assert_(b == 81, "error with %r: got %r" % (t, b))
+
+ def test_large_types(self):
+ for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
+ a = t(51)
+ b = a ** 4
+ msg = "error with %r: got %r" % (t, b)
+ if np.issubdtype(t, np.integer):
+ assert_(b == 6765201, msg)
+ else:
+ assert_almost_equal(b, 6765201, err_msg=msg)
+
+ def test_integers_to_negative_integer_power(self):
+ # Note that the combination of uint64 with a signed integer
+ # has common type np.float64. The other combinations should all
+ # raise a ValueError for integer ** negative integer.
+ exp = [np.array(-1, dt)[()] for dt in 'bhilq']
+
+ # 1 ** -1 possible special case
+ base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, 1.)
+
+ # -1 ** -1 possible special case
+ base = [np.array(-1, dt)[()] for dt in 'bhilq']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, -1.)
+
+ # 2 ** -1 perhaps generic
+ base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, .5)
+
+ def test_mixed_types(self):
+ typelist = [np.int8, np.int16, np.float16,
+ np.float32, np.float64, np.int8,
+ np.int16, np.int32, np.int64]
+ for t1 in typelist:
+ for t2 in typelist:
+ a = t1(3)
+ b = t2(2)
+ result = a**b
+ msg = ("error with %r and %r:"
+ "got %r, expected %r") % (t1, t2, result, 9)
+ if np.issubdtype(np.dtype(result), np.integer):
+ assert_(result == 9, msg)
+ else:
+ assert_almost_equal(result, 9, err_msg=msg)
+
+ def test_modular_power(self):
+ # modular power is not implemented, so ensure it errors
+ a = 5
+ b = 4
+ c = 10
+ expected = pow(a, b, c) # noqa: F841
+ for t in (np.int32, np.float32, np.complex64):
+ # note that 3-operand power only dispatches on the first argument
+ assert_raises(TypeError, operator.pow, t(a), b, c)
+ assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
+
+
+def floordiv_and_mod(x, y):
+ return (x // y, x % y)
+
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestModulus(object):
+
+ def test_modulus_basic(self):
+ dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)[()]
+ b = np.array(sg2*19, dtype=dt2)[()]
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_modulus_exact(self):
+ # test that float results are exact for small integers. This also
+ # holds for the same integers scaled by powers of two.
+ nlst = list(range(-127, 0))
+ plst = list(range(1, 128))
+ dividend = nlst + [0] + plst
+ divisor = nlst + plst
+ arg = list(itertools.product(dividend, divisor))
+ tgt = list(divmod(*t) for t in arg)
+
+ a, b = np.array(arg, dtype=int).T
+ # convert exact integer results from Python to float so that
+ # signed zero can be used, it is checked.
+ tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+ tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+ tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+ for op in [floordiv_and_mod, divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ # use list comprehension so a_ and b_ are scalars
+ div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
+
+ def test_float_modulus_roundoff(self):
+ # gh-6127
+ dt = np.typecodes['Float']
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)[()]
+ b = np.array(sg2*6e-8, dtype=dt2)[()]
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_modulus_corner_cases(self):
+ # Check remainder magnitude.
+ for dt in np.typecodes['Float']:
+ b = np.array(1.0, dtype=dt)
+ a = np.nextafter(np.array(0.0, dtype=dt), -b)
+ rem = operator.mod(a, b)
+ assert_(rem <= b, 'dt: %s' % dt)
+ rem = operator.mod(-a, -b)
+ assert_(rem >= -b, 'dt: %s' % dt)
+
+ # Check nans, inf
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+ for dt in np.typecodes['Float']:
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ finf = np.array(np.inf, dtype=dt)
+ fnan = np.array(np.nan, dtype=dt)
+ rem = operator.mod(fone, fzer)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+ # MSVC 2008 returns NaN here, so disable the check.
+ #rem = operator.mod(fone, finf)
+ #assert_(rem == fone, 'dt: %s' % dt)
+ rem = operator.mod(fone, fnan)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+ rem = operator.mod(finf, fone)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+
+
+class TestComplexDivision(object):
+ def test_zero_division(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ a = t(0.0)
+ b = t(1.0)
+ assert_(np.isinf(b/a))
+ b = t(complex(np.inf, np.inf))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.inf, np.nan))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.nan, np.inf))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.nan, np.nan))
+ assert_(np.isnan(b/a))
+ b = t(0.)
+ assert_(np.isnan(b/a))
+
+ def test_signed_zeros(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ # tupled (numerator, denominator, expected)
+ # for testing as expected == numerator/denominator
+ data = (
+ (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
+ (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
+ (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
+ (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
+ (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
+ )
+ for cases in data:
+ n = cases[0]
+ d = cases[1]
+ ex = cases[2]
+ result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+ # check real and imag parts separately to avoid comparison
+ # in array context, which does not account for signed zeros
+ assert_equal(result.real, ex[0])
+ assert_equal(result.imag, ex[1])
+
+ def test_branches(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ # tupled (numerator, denominator, expected)
+ # for testing as expected == numerator/denominator
+ data = list()
+
+ # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+ # followed by else condition as neither are == 0
+ data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
+
+ # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+ # followed by if condition as both are == 0
+ # is performed in test_zero_division(), so this is skipped
+
+ # trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
+ data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
+
+ for cases in data:
+ n = cases[0]
+ d = cases[1]
+ ex = cases[2]
+ result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+ # check real and imag parts separately to avoid comparison
+ # in array context, which does not account for signed zeros
+ assert_equal(result.real, ex[0])
+ assert_equal(result.imag, ex[1])
+
+
+class TestConversion(object):
+ def test_int_from_long(self):
+ l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
+ li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
+ for T in [None, np.float64, np.int64]:
+ a = np.array(l, dtype=T)
+ assert_equal([int(_m) for _m in a], li)
+
+ a = np.array(l[:3], dtype=np.uint64)
+ assert_equal([int(_m) for _m in a], li[:3])
+
+ def test_iinfo_long_values(self):
+ for code in 'bBhH':
+ res = np.array(np.iinfo(code).max + 1, dtype=code)
+ tgt = np.iinfo(code).min
+ assert_(res == tgt)
+
+ for code in np.typecodes['AllInteger']:
+ res = np.array(np.iinfo(code).max, dtype=code)
+ tgt = np.iinfo(code).max
+ assert_(res == tgt)
+
+ for code in np.typecodes['AllInteger']:
+ res = np.typeDict[code](np.iinfo(code).max)
+ tgt = np.iinfo(code).max
+ assert_(res == tgt)
+
+ def test_int_raise_behaviour(self):
+ def overflow_error_func(dtype):
+ np.typeDict[dtype](np.iinfo(dtype).max + 1)
+
+ for code in 'lLqQ':
+ assert_raises(OverflowError, overflow_error_func, code)
+
+ def test_int_from_infinite_longdouble(self):
+ # gh-627
+ x = np.longdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
+ def test_int_from_infinite_longdouble___int__(self):
+ x = np.longdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+ @pytest.mark.skipif(platform.machine().startswith("ppc64"),
+ reason="IBM double double")
+ def test_int_from_huge_longdouble(self):
+ # Produce a longdouble that would overflow a double,
+ # use exponent that avoids bug in Darwin pow function.
+ exp = np.finfo(np.double).maxexp - 1
+ huge_ld = 2 * 1234 * np.longdouble(2) ** exp
+ huge_i = 2 * 1234 * 2 ** exp
+ assert_(huge_ld != np.inf)
+ assert_equal(int(huge_ld), huge_i)
+
+ def test_int_from_longdouble(self):
+ x = np.longdouble(1.5)
+ assert_equal(int(x), 1)
+ x = np.longdouble(-10.5)
+ assert_equal(int(x), -10)
+
+ def test_numpy_scalar_relational_operators(self):
+ # All integer
+ for dt1 in np.typecodes['AllInteger']:
+ assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ for dt2 in np.typecodes['AllInteger']:
+ assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ #Unsigned integers
+ for dt1 in 'BHILQP':
+ assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ #unsigned vs signed
+ for dt2 in 'bhilqp':
+ assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ #Signed integers and floats
+ for dt1 in 'bhlqp' + np.typecodes['Float']:
+ assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ for dt2 in 'bhlqp' + np.typecodes['Float']:
+ assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ def test_scalar_comparison_to_none(self):
+ # Scalars should just return False and not give a warnings.
+ # The comparisons are flagged by pep8, ignore that.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ assert_(not np.float32(1) == None)
+ assert_(not np.str_('test') == None)
+ # This is dubious (see below):
+ assert_(not np.datetime64('NaT') == None)
+
+ assert_(np.float32(1) != None)
+ assert_(np.str_('test') != None)
+ # This is dubious (see below):
+ assert_(np.datetime64('NaT') != None)
+ assert_(len(w) == 0)
+
+ # For documentation purposes, this is why the datetime is dubious.
+ # At the time of deprecation this was no behaviour change, but
+ # it has to be considered when the deprecations are done.
+ assert_(np.equal(np.datetime64('NaT'), None))
+
+
+#class TestRepr(object):
+# def test_repr(self):
+# for t in types:
+# val = t(1197346475.0137341)
+# val_repr = repr(val)
+# val2 = eval(val_repr)
+# assert_equal( val, val2 )
+
+
+class TestRepr(object):
+ def _test_type_repr(self, t):
+ finfo = np.finfo(t)
+ last_fraction_bit_idx = finfo.nexp + finfo.nmant
+ last_exponent_bit_idx = finfo.nexp
+ storage_bytes = np.dtype(t).itemsize*8
+ # could add some more types to the list below
+ for which in ['small denorm', 'small norm']:
+ # Values from https://en.wikipedia.org/wiki/IEEE_754
+ constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
+ if which == 'small denorm':
+ byte = last_fraction_bit_idx // 8
+ bytebit = 7-(last_fraction_bit_idx % 8)
+ constr[byte] = 1 << bytebit
+ elif which == 'small norm':
+ byte = last_exponent_bit_idx // 8
+ bytebit = 7-(last_exponent_bit_idx % 8)
+ constr[byte] = 1 << bytebit
+ else:
+ raise ValueError('hmm')
+ val = constr.view(t)[0]
+ val_repr = repr(val)
+ val2 = t(eval(val_repr))
+ if not (val2 == 0 and val < 1e-100):
+ assert_equal(val, val2)
+
+ def test_float_repr(self):
+ # long double test cannot work, because eval goes through a python
+ # float
+ for t in [np.float32, np.float64]:
+ self._test_type_repr(t)
+
+
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf(object):
+
+ def test_equal_nbytes(self):
+ for type in types:
+ x = type(0)
+ assert_(sys.getsizeof(x) > x.nbytes)
+
+ def test_error(self):
+ d = np.float32()
+ assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestMultiply(object):
+ def test_seq_repeat(self):
+ # Test that basic sequences get repeated when multiplied with
+ # numpy integers. And errors are raised when multiplied with others.
+ # Some of this behaviour may be controversial and could be open for
+ # change.
+ accepted_types = set(np.typecodes["AllInteger"])
+ deprecated_types = {'?'}
+ forbidden_types = (
+ set(np.typecodes["All"]) - accepted_types - deprecated_types)
+ forbidden_types -= {'V'} # can't default-construct void scalars
+
+ for seq_type in (list, tuple):
+ seq = seq_type([1, 2, 3])
+ for numpy_type in accepted_types:
+ i = np.dtype(numpy_type).type(2)
+ assert_equal(seq * i, seq * int(i))
+ assert_equal(i * seq, int(i) * seq)
+
+ for numpy_type in deprecated_types:
+ i = np.dtype(numpy_type).type()
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, seq, i),
+ seq * int(i))
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, i, seq),
+ int(i) * seq)
+
+ for numpy_type in forbidden_types:
+ i = np.dtype(numpy_type).type()
+ assert_raises(TypeError, operator.mul, seq, i)
+ assert_raises(TypeError, operator.mul, i, seq)
+
+ def test_no_seq_repeat_basic_array_like(self):
+ # Test that an array-like which does not know how to be multiplied
+ # does not attempt sequence repeat (raise TypeError).
+ # See also gh-7428.
+ class ArrayLike(object):
+ def __init__(self, arr):
+ self.arr = arr
+ def __array__(self):
+ return self.arr
+
+ # Test for simple ArrayLike above and memoryviews (original report)
+ for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
+ assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
+ assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
+ assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
+ assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
+
+
+class TestNegative(object):
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.neg, a)
+
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ assert_equal(operator.neg(a) + a, 0)
+
+
+class TestSubtract(object):
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.sub, a, a)
+
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ assert_equal(operator.sub(a, a), 0)
+
+
+class TestAbs(object):
+ def _test_abs_func(self, absfunc):
+ for tp in floating_types + complex_floating_types:
+ x = tp(-1.5)
+ assert_equal(absfunc(x), 1.5)
+ x = tp(0.0)
+ res = absfunc(x)
+ # assert_equal() checks zero signedness
+ assert_equal(res, 0.0)
+ x = tp(-0.0)
+ res = absfunc(x)
+ assert_equal(res, 0.0)
+
+ x = tp(np.finfo(tp).max)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).tiny)
+ assert_equal(absfunc(x), x.real)
+
+ x = tp(np.finfo(tp).min)
+ assert_equal(absfunc(x), -x.real)
+
+ def test_builtin_abs(self):
+ self._test_abs_func(abs)
+
+ def test_numpy_abs(self):
+ self._test_abs_func(np.abs)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc
new file mode 100644
index 0000000..39484a4
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarmath.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py
new file mode 100644
index 0000000..cde1355
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.py
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+""" Test printing of scalar types.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import code, sys
+import platform
+import pytest
+
+from tempfile import TemporaryFile
+import numpy as np
+from numpy.testing import assert_, assert_equal, suppress_warnings
+
+class TestRealScalars(object):
+ def test_str(self):
+ svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
+ styps = [np.float16, np.float32, np.float64, np.longdouble]
+ wanted = [
+ ['0.0', '0.0', '0.0', '0.0' ],
+ ['-0.0', '-0.0', '-0.0', '-0.0'],
+ ['1.0', '1.0', '1.0', '1.0' ],
+ ['-1.0', '-1.0', '-1.0', '-1.0'],
+ ['inf', 'inf', 'inf', 'inf' ],
+ ['-inf', '-inf', '-inf', '-inf'],
+ ['nan', 'nan', 'nan', 'nan']]
+
+ for wants, val in zip(wanted, svals):
+ for want, styp in zip(wants, styps):
+ msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
+ assert_equal(str(styp(val)), want, err_msg=msg)
+
+ def test_scalar_cutoffs(self):
+ # test that both the str and repr of np.float64 behaves
+ # like python floats in python3. Note that in python2
+ # the str has truncated digits, but we do not do this
+ def check(v):
+ # we compare str to repr, to avoid python2 truncation behavior
+ assert_equal(str(np.float64(v)), repr(v))
+ assert_equal(repr(np.float64(v)), repr(v))
+
+ # check we use the same number of significant digits
+ check(1.12345678901234567890)
+ check(0.0112345678901234567890)
+
+ # check switch from scientific output to positional and back
+ check(1e-5)
+ check(1e-4)
+ check(1e15)
+ check(1e16)
+
+ def test_py2_float_print(self):
+ # gh-10753
+ # In python2, the python float type implements an obsolte method
+ # tp_print, which overrides tp_repr and tp_str when using "print" to
+ # output to a "real file" (ie, not a StringIO). Make sure we don't
+ # inherit it.
+ x = np.double(0.1999999999999)
+ with TemporaryFile('r+t') as f:
+ print(x, file=f)
+ f.seek(0)
+ output = f.read()
+ assert_equal(output, str(x) + '\n')
+ # In python2 the value float('0.1999999999999') prints with reduced
+ # precision as '0.2', but we want numpy's np.double('0.1999999999999')
+ # to print the unique value, '0.1999999999999'.
+
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ # py2 code.interact sends irrelevant internal DeprecationWarnings
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning)
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
+ def test_dragon4(self):
+ # these tests are adapted from Ryan Juckett's dragon4 implementation,
+ # see dragon4.c for details.
+
+ fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
+ fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
+ fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
+ fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
+
+ preckwd = lambda prec: {'unique': False, 'precision': prec}
+
+ assert_equal(fpos32('1.0'), "1.")
+ assert_equal(fsci32('1.0'), "1.e+00")
+ assert_equal(fpos32('10.234'), "10.234")
+ assert_equal(fpos32('-10.234'), "-10.234")
+ assert_equal(fsci32('10.234'), "1.0234e+01")
+ assert_equal(fsci32('-10.234'), "-1.0234e+01")
+ assert_equal(fpos32('1000.0'), "1000.")
+ assert_equal(fpos32('1.0', precision=0), "1.")
+ assert_equal(fsci32('1.0', precision=0), "1.e+00")
+ assert_equal(fpos32('10.234', precision=0), "10.")
+ assert_equal(fpos32('-10.234', precision=0), "-10.")
+ assert_equal(fsci32('10.234', precision=0), "1.e+01")
+ assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
+ assert_equal(fpos32('10.234', precision=2), "10.23")
+ assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
+ assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
+ '9.9999999999999995e-08')
+ assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
+ '9.8813129168249309e-324')
+ assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
+ '9.9999999999999694e-311')
+
+
+ # test rounding
+ # 3.1415927410 is closest float32 to np.pi
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410")
+ assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410e+00")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536")
+ assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536e+00")
+ # 299792448 is closest float32 to 299792458
+ assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
+ assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
+ assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
+ assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
+
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
+ "3.1415927410125732421875000")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
+ "3.14159265358979311599796346854418516159057617187500")
+ assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
+
+
+ # smallest numbers
+ assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
+ "0.00000000000000000000000000000000000000000000140129846432"
+ "4817070923729583289916131280261941876515771757068283889791"
+ "08268586060148663818836212158203125")
+ assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074),
+ "0.00000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000049406564584124654417656"
+ "8792868221372365059802614324764425585682500675507270208751"
+ "8652998363616359923797965646954457177309266567103559397963"
+ "9877479601078187812630071319031140452784581716784898210368"
+ "8718636056998730723050006387409153564984387312473397273169"
+ "6151400317153853980741262385655911710266585566867681870395"
+ "6031062493194527159149245532930545654440112748012970999954"
+ "1931989409080416563324524757147869014726780159355238611550"
+ "1348035264934720193790268107107491703332226844753335720832"
+ "4319360923828934583680601060115061698097530783422773183292"
+ "4790498252473077637592724787465608477820373446969953364701"
+ "7972677717585125660551199131504891101451037862738167250955"
+ "8373897335989936648099411642057026370902792427675445652290"
+ "87538682506419718265533447265625")
+
+ # largest numbers
+ assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)),
+ "340282346638528859811704183484516925440.")
+ assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
+ "1797693134862315708145274237317043567980705675258449965989"
+ "1747680315726078002853876058955863276687817154045895351438"
+ "2464234321326889464182768467546703537516986049910576551282"
+ "0762454900903893289440758685084551339423045832369032229481"
+ "6580855933212334827479782620414472316873817718091929988125"
+ "0404026184124858368.")
+ # Warning: In unique mode only the integer digits necessary for
+ # uniqueness are computed, the rest are 0. Should we change this?
+ assert_equal(fpos32(np.finfo(np.float32).max, precision=0),
+ "340282350000000000000000000000000000000.")
+
+ # test trailing zeros
+ assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
+ assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
+ # gh-10713
+ assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00")
+
+ def test_dragon4_interface(self):
+ tps = [np.float16, np.float32, np.float64]
+ if hasattr(np, 'float128'):
+ tps.append(np.float128)
+
+ fpos = np.format_float_positional
+ fsci = np.format_float_scientific
+
+ for tp in tps:
+ # test padding
+ assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
+ assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
+ assert_equal(fpos(tp('-10.2'),
+ pad_left=4, pad_right=4), " -10.2 ")
+
+ # test exp_digits
+ assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
+
+ # test fixed (non-unique) mode
+ assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
+ assert_equal(fsci(tp('1.0'), unique=False, precision=4),
+ "1.0000e+00")
+
+ # test trimming
+ # trim of 'k' or '.' only affects non-unique mode, since unique
+ # mode will not output trailing 0s.
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
+ "1.0000")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
+ "1.")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
+ "1.2" if tp != np.float16 else "1.2002")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
+ "1.0")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='0'), "1.0")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
+ "1")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='-'), "1")
+
+ @pytest.mark.skipif(not platform.machine().startswith("ppc64"),
+ reason="only applies to ppc float128 values")
+ def test_ppc64_ibm_double_double128(self):
+ # check that the precision decreases once we get into the subnormal
+ # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
+ # which happens when the first double is normal and the second is
+ # subnormal.
+ x = np.float128('2.123123123123123123123123123123123e-286')
+ got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
+ expected = [
+ "1.06156156156156156156156156156157e-286",
+ "1.06156156156156156156156156156158e-287",
+ "1.06156156156156156156156156156159e-288",
+ "1.0615615615615615615615615615616e-289",
+ "1.06156156156156156156156156156157e-290",
+ "1.06156156156156156156156156156156e-291",
+ "1.0615615615615615615615615615616e-292",
+ "1.0615615615615615615615615615615e-293",
+ "1.061561561561561561561561561562e-294",
+ "1.06156156156156156156156156155e-295",
+ "1.0615615615615615615615615616e-296",
+ "1.06156156156156156156156156e-297",
+ "1.06156156156156156156156157e-298",
+ "1.0615615615615615615615616e-299",
+ "1.06156156156156156156156e-300",
+ "1.06156156156156156156155e-301",
+ "1.0615615615615615615616e-302",
+ "1.061561561561561561562e-303",
+ "1.06156156156156156156e-304",
+ "1.0615615615615615618e-305",
+ "1.06156156156156156e-306",
+ "1.06156156156156157e-307",
+ "1.0615615615615616e-308",
+ "1.06156156156156e-309",
+ "1.06156156156157e-310",
+ "1.0615615615616e-311",
+ "1.06156156156e-312",
+ "1.06156156154e-313",
+ "1.0615615616e-314",
+ "1.06156156e-315",
+ "1.06156155e-316",
+ "1.061562e-317",
+ "1.06156e-318",
+ "1.06155e-319",
+ "1.0617e-320",
+ "1.06e-321",
+ "1.04e-322",
+ "1e-323",
+ "0.0",
+ "0.0"]
+ assert_equal(got, expected)
+
+ # Note: we follow glibc behavior, but it (or gcc) might not be right.
+ # In particular we can get two values that print the same but are not
+ # equal:
+ a = np.float128('2')/np.float128('3')
+ b = np.float128(str(a))
+ assert_equal(str(a), str(b))
+ assert_(a != b)
+
+ def float32_roundtrip(self):
+ # gh-9360
+ x = np.float32(1024 - 2**-14)
+ y = np.float32(1024 - 2**-13)
+ assert_(repr(x) != repr(y))
+ assert_equal(np.float32(repr(x)), x)
+ assert_equal(np.float32(repr(y)), y)
+
+ def float64_vs_python(self):
+ # gh-2643, gh-6136, gh-6908
+ assert_equal(repr(np.float64(0.1)), repr(0.1))
+ assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc
new file mode 100644
index 0000000..c2b6ac7
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_scalarprint.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py
new file mode 100644
index 0000000..b996321
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py
@@ -0,0 +1,706 @@
+from __future__ import division, absolute_import, print_function
+
+import pytest
+import sys
+import numpy as np
+from numpy.core import (
+ array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
+ newaxis, concatenate, stack
+ )
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+ _block_concatenate, _block_slicing)
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, assert_equal,
+ assert_raises_regex, assert_warns
+ )
+
+from numpy.compat import long
+
+class TestAtleast1d(object):
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [array([1]), array([2])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [array([1, 2]), array([2, 3])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_r1array(self):
+ """ Test to make sure equivalent Travis O's r1array function
+ """
+ assert_(atleast_1d(3).shape == (1,))
+ assert_(atleast_1d(3j).shape == (1,))
+ assert_(atleast_1d(long(3)).shape == (1,))
+ assert_(atleast_1d(3.0).shape == (1,))
+ assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
+
+
+class TestAtleast2d(object):
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1]]), array([[2]])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1, 2]]), array([[2, 3]])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_r2array(self):
+ """ Test to make sure equivalent Travis O's r2array function
+ """
+ assert_(atleast_2d(3).shape == (1, 1))
+ assert_(atleast_2d([3j, 1]).shape == (1, 2))
+ assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
+
+
+class TestAtleast3d(object):
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1]]]), array([[[2]]])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1], [2]]]), array([[[2], [3]]])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a[:,:, newaxis], b[:,:, newaxis]]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+
+class TestHstack(object):
+ def test_non_iterable(self):
+ assert_raises(TypeError, hstack, 1)
+
+ def test_empty_input(self):
+ assert_raises(ValueError, hstack, ())
+
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = hstack([a, b])
+ desired = array([1, 2])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1])
+ b = array([2])
+ res = hstack([a, b])
+ desired = array([1, 2])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1], [2]])
+ b = array([[1], [2]])
+ res = hstack([a, b])
+ desired = array([[1, 1], [2, 2]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ hstack((np.arange(3) for _ in range(2)))
+ if sys.version_info.major > 2:
+ # map returns a list on Python 2
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
+
+
+class TestVstack(object):
+ def test_non_iterable(self):
+ assert_raises(TypeError, vstack, 1)
+
+ def test_empty_input(self):
+ assert_raises(ValueError, vstack, ())
+
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = vstack([a, b])
+ desired = array([[1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1])
+ b = array([2])
+ res = vstack([a, b])
+ desired = array([[1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1], [2]])
+ b = array([[1], [2]])
+ res = vstack([a, b])
+ desired = array([[1], [2], [1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array2(self):
+ a = array([1, 2])
+ b = array([1, 2])
+ res = vstack([a, b])
+ desired = array([[1, 2], [1, 2]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ vstack((np.arange(3) for _ in range(2)))
+
+
+class TestConcatenate(object):
+ def test_returns_copy(self):
+ a = np.eye(3)
+ b = np.concatenate([a])
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_exceptions(self):
+ # test axis must be in bounds
+ for ndim in [1, 2, 3]:
+ a = np.ones((1,)*ndim)
+ np.concatenate((a, a), axis=0) # OK
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
+
+ # Scalars cannot be concatenated
+ assert_raises(ValueError, concatenate, (0,))
+ assert_raises(ValueError, concatenate, (np.array(0),))
+
+ # test shapes must match except for concatenation axis
+ a = np.ones((1, 2, 3))
+ b = np.ones((2, 2, 3))
+ axis = list(range(3))
+ for i in range(3):
+ np.concatenate((a, b), axis=axis[0]) # OK
+ assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
+ assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
+ a = np.moveaxis(a, -1, 0)
+ b = np.moveaxis(b, -1, 0)
+ axis.append(axis.pop(0))
+
+ # No arrays to concatenate raises ValueError
+ assert_raises(ValueError, concatenate, ())
+
+ def test_concatenate_axis_None(self):
+ a = np.arange(4, dtype=np.float64).reshape((2, 2))
+ b = list(range(3))
+ c = ['x']
+ r = np.concatenate((a, a), axis=None)
+ assert_equal(r.dtype, a.dtype)
+ assert_equal(r.ndim, 1)
+ r = np.concatenate((a, b), axis=None)
+ assert_equal(r.size, a.size + len(b))
+ assert_equal(r.dtype, a.dtype)
+ r = np.concatenate((a, b, c), axis=None)
+ d = array(['0.0', '1.0', '2.0', '3.0',
+ '0', '1', '2', 'x'])
+ assert_array_equal(r, d)
+
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
+ def test_large_concatenate_axis_None(self):
+ # When no axis is given, concatenate uses flattened versions.
+ # This also had a bug with many arrays (see gh-5979).
+ x = np.arange(1, 100)
+ r = np.concatenate(x, None)
+ assert_array_equal(x, r)
+
+ # This should probably be deprecated:
+ r = np.concatenate(x, 100) # axis is >= MAXDIMS
+ assert_array_equal(x, r)
+
+ def test_concatenate(self):
+ # Test concatenate function
+ # One sequence returns unmodified (but as array)
+ r4 = list(range(4))
+ assert_array_equal(concatenate((r4,)), r4)
+ # Any sequence
+ assert_array_equal(concatenate((tuple(r4),)), r4)
+ assert_array_equal(concatenate((array(r4),)), r4)
+ # 1D default concatenation
+ r3 = list(range(3))
+ assert_array_equal(concatenate((r4, r3)), r4 + r3)
+ # Mixed sequence types
+ assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
+ assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
+ # Explicit axis specification
+ assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
+ # Including negative
+ assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
+ # 2D
+ a23 = array([[10, 11, 12], [13, 14, 15]])
+ a13 = array([[0, 1, 2]])
+ res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
+ assert_array_equal(concatenate((a23, a13)), res)
+ assert_array_equal(concatenate((a23, a13), 0), res)
+ assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
+ assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
+ # Arrays much match shape
+ assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
+ # 3D
+ res = arange(2 * 3 * 7).reshape((2, 3, 7))
+ a0 = res[..., :4]
+ a1 = res[..., 4:6]
+ a2 = res[..., 6:]
+ assert_array_equal(concatenate((a0, a1, a2), 2), res)
+ assert_array_equal(concatenate((a0, a1, a2), -1), res)
+ assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ def test_out_dtype(self):
+ out = np.empty(4, np.float32)
+ res = concatenate((array([1, 2]), array([3, 4])), out=out)
+ assert_(out is res)
+
+ out = np.empty(4, np.complex64)
+ res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ assert_(out is res)
+
+ # invalid cast
+ out = np.empty(4, np.int32)
+ assert_raises(TypeError, concatenate,
+ (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+
+
+def test_stack():
+ # non-iterable input
+ assert_raises(TypeError, stack, 1)
+
+ # 0d input
+ for input_ in [(1, 2, 3),
+ [np.int32(1), np.int32(2), np.int32(3)],
+ [np.array(1), np.array(2), np.array(3)]]:
+ assert_array_equal(stack(input_), [1, 2, 3])
+ # 1d input examples
+ a = np.array([1, 2, 3])
+ b = np.array([4, 5, 6])
+ r1 = array([[1, 2, 3], [4, 5, 6]])
+ assert_array_equal(np.stack((a, b)), r1)
+ assert_array_equal(np.stack((a, b), axis=1), r1.T)
+ # all input types
+ assert_array_equal(np.stack(list([a, b])), r1)
+ assert_array_equal(np.stack(array([a, b])), r1)
+ # all shapes for 1d input
+ arrays = [np.random.randn(3) for _ in range(10)]
+ axes = [0, 1, -1, -2]
+ expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
+ for axis, expected_shape in zip(axes, expected_shapes):
+ assert_equal(np.stack(arrays, axis).shape, expected_shape)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
+ # all shapes for 2d input
+ arrays = [np.random.randn(3, 4) for _ in range(10)]
+ axes = [0, 1, 2, -1, -2, -3]
+ expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
+ (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+ for axis, expected_shape in zip(axes, expected_shapes):
+ assert_equal(np.stack(arrays, axis).shape, expected_shape)
+ # empty arrays
+ assert_(stack([[], [], []]).shape == (3, 0))
+ assert_(stack([[], [], []], axis=1).shape == (0, 3))
+ # out
+ out = np.zeros_like(r1)
+ np.stack((a, b), out=out)
+ assert_array_equal(out, r1)
+ # edge cases
+ assert_raises_regex(ValueError, 'need at least one array', stack, [])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [1, np.arange(3)])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(3), 1])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(3), 1], axis=1)
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(2), np.arange(3)])
+ # generator is deprecated
+ with assert_warns(FutureWarning):
+ result = stack((x for x in range(3)))
+ assert_array_equal(result, np.array([0, 1, 2]))
+
+
+class TestBlock(object):
+ @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+ def block(self, request):
+ # blocking small arrays and large arrays go through different paths.
+ # the algorithm is triggered depending on the number of element
+ # copies required.
+ # We define a test fixture that forces most tests to go through
+ # both code paths.
+ # Ultimately, this should be removed if a single algorithm is found
+ # to be faster for both small and large arrays.
+ def _block_force_concatenate(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+ def _block_force_slicing(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_slicing(arrays, list_ndim, result_ndim)
+
+ if request.param == 'force_concatenate':
+ return _block_force_concatenate
+ elif request.param == 'force_slicing':
+ return _block_force_slicing
+ elif request.param == 'block':
+ return block
+ else:
+ raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+ def test_returns_copy(self, block):
+ a = np.eye(3)
+ b = block(a)
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_block_total_size_estimate(self, block):
+ _, _, _, total_size = _block_setup([1])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1]])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1, 1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1], [1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+ assert total_size == 4
+
+ def test_block_simple_row_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ desired = np.array([[1, 1, 2, 2],
+ [1, 1, 2, 2]])
+ result = block([a_2d, b_2d])
+ assert_equal(desired, result)
+
+ def test_block_simple_column_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2],
+ [2, 2]])
+ result = block([[a_2d], [b_2d]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_row_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([1, 2, 3, 2, 3, 4])
+ result = block([a, b])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_multiple_rows(self, block):
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3, 2, 3, 4],
+ [1, 2, 3, 2, 3, 4]])
+ result = block([[a, b], [a, b]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_column_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a_1d = np.array([1, 2, 3])
+ b_1d = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3],
+ [2, 3, 4]])
+ result = block([[a_1d], [b_1d]])
+ assert_equal(expected, result)
+
+ def test_block_mixed_1d_and_2d(self, block):
+ a_2d = np.ones((2, 2))
+ b_1d = np.array([2, 2])
+ result = block([[a_2d], [b_1d]])
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2]])
+ assert_equal(expected, result)
+
+ def test_block_complicated(self, block):
+ # a bit more complicated
+ one_2d = np.array([[1, 1, 1]])
+ two_2d = np.array([[2, 2, 2]])
+ three_2d = np.array([[3, 3, 3, 3, 3, 3]])
+ four_1d = np.array([4, 4, 4, 4, 4, 4])
+ five_0d = np.array(5)
+ six_1d = np.array([6, 6, 6, 6, 6])
+ zero_2d = np.zeros((2, 6))
+
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4, 4],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ result = block([[one_2d, two_2d],
+ [three_2d],
+ [four_1d],
+ [five_0d, six_1d],
+ [zero_2d]])
+ assert_equal(result, expected)
+
+ def test_nested(self, block):
+ one = np.array([1, 1, 1])
+ two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
+ three = np.array([3, 3, 3])
+ four = np.array([4, 4, 4])
+ five = np.array(5)
+ six = np.array([6, 6, 6, 6, 6])
+ zero = np.zeros((2, 6))
+
+ result = block([
+ [
+ block([
+ [one],
+ [three],
+ [four]
+ ]),
+ two
+ ],
+ [five, six],
+ [zero]
+ ])
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 2, 2, 2],
+ [4, 4, 4, 2, 2, 2],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ assert_equal(result, expected)
+
+ def test_3d(self, block):
+ a000 = np.ones((2, 2, 2), int) * 1
+
+ a100 = np.ones((3, 2, 2), int) * 2
+ a010 = np.ones((2, 3, 2), int) * 3
+ a001 = np.ones((2, 2, 3), int) * 4
+
+ a011 = np.ones((2, 3, 3), int) * 5
+ a101 = np.ones((3, 2, 3), int) * 6
+ a110 = np.ones((3, 3, 2), int) * 7
+
+ a111 = np.ones((3, 3, 3), int) * 8
+
+ result = block([
+ [
+ [a000, a001],
+ [a010, a011],
+ ],
+ [
+ [a100, a101],
+ [a110, a111],
+ ]
+ ])
+ expected = array([[[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]]])
+
+ assert_array_equal(result, expected)
+
+ def test_block_with_mismatched_shape(self, block):
+ a = np.array([0, 0])
+ b = np.eye(2)
+ assert_raises(ValueError, block, [a, b])
+ assert_raises(ValueError, block, [b, a])
+
+ to_block = [[np.ones((2,3)), np.ones((2,2))],
+ [np.ones((2,2)), np.ones((2,2))]]
+ assert_raises(ValueError, block, to_block)
+ def test_no_lists(self, block):
+ assert_equal(block(1), np.array(1))
+ assert_equal(block(np.eye(3)), np.eye(3))
+
+ def test_invalid_nesting(self, block):
+ msg = 'depths are mismatched'
+ assert_raises_regex(ValueError, msg, block, [1, [2]])
+ assert_raises_regex(ValueError, msg, block, [1, []])
+ assert_raises_regex(ValueError, msg, block, [[1], 2])
+ assert_raises_regex(ValueError, msg, block, [[], 2])
+ assert_raises_regex(ValueError, msg, block, [
+ [[1], [2]],
+ [[3, 4]],
+ [5] # missing brackets
+ ])
+
+ def test_empty_lists(self, block):
+ assert_raises_regex(ValueError, 'empty', block, [])
+ assert_raises_regex(ValueError, 'empty', block, [[]])
+ assert_raises_regex(ValueError, 'empty', block, [[1], []])
+
+ def test_tuple(self, block):
+ assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+ assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
+
+ def test_different_ndims(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 1, 3))
+
+ result = block([a, b, c])
+ expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_different_ndims_depths(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 2, 3))
+
+ result = block([[a, b], [c]])
+ expected = np.array([[[1., 2., 2.],
+ [3., 3., 3.],
+ [3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_block_memory_order(self, block):
+ # 3D
+ arr_c = np.zeros((3,)*3, order='C')
+ arr_f = np.zeros((3,)*3, order='F')
+
+ b_c = [[[arr_c, arr_c],
+ [arr_c, arr_c]],
+ [[arr_c, arr_c],
+ [arr_c, arr_c]]]
+
+ b_f = [[[arr_f, arr_f],
+ [arr_f, arr_f]],
+ [[arr_f, arr_f],
+ [arr_f, arr_f]]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+ arr_c = np.zeros((3, 3), order='C')
+ arr_f = np.zeros((3, 3), order='F')
+ # 2D
+ b_c = [[arr_c, arr_c],
+ [arr_c, arr_c]]
+
+ b_f = [[arr_f, arr_f],
+ [arr_f, arr_f]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+ class ArrayLike(object):
+ pass
+ a = ArrayLike()
+ b = ArrayLike()
+ c = ArrayLike()
+ assert_equal(list(_block_dispatcher(a)), [a])
+ assert_equal(list(_block_dispatcher([a])), [a])
+ assert_equal(list(_block_dispatcher([a, b])), [a, b])
+ assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+ # don't recurse into non-lists
+ assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc
new file mode 100644
index 0000000..3dbb3af
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py
new file mode 100644
index 0000000..b83b8cc
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.py
@@ -0,0 +1,1859 @@
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import itertools
+
+import numpy as np
+import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
+import numpy.core._operand_flag_tests as opflag_tests
+import numpy.core._rational_tests as _rational_tests
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
+ assert_allclose,
+ )
+from numpy.core.numeric import pickle
+
+
+class TestUfuncKwargs(object):
+ def test_kwarg_exact(self):
+ assert_raises(TypeError, np.add, 1, 2, castingx='safe')
+ assert_raises(TypeError, np.add, 1, 2, dtypex=int)
+ assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
+ assert_raises(TypeError, np.add, 1, 2, outx=None)
+ assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, subokx=False)
+ assert_raises(TypeError, np.add, 1, 2, wherex=[True])
+
+ def test_sig_signature(self):
+ assert_raises(ValueError, np.add, 1, 2, sig='ii->i',
+ signature='ii->i')
+
+ def test_sig_dtype(self):
+ assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
+ dtype=int)
+ assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
+ dtype=int)
+
+ def test_extobj_refcount(self):
+ # Should not segfault with USE_DEBUG.
+ assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
+
+
+class TestUfunc(object):
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(pickle.dumps(np.sin,
+ protocol=proto)) is np.sin)
+
+ # Check that ufunc not defined in the top level numpy namespace
+ # such as numpy.core._rational_tests.test_add can also be pickled
+ res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+ protocol=proto))
+ assert_(res is _rational_tests.test_add)
+
+ def test_pickle_withstring(self):
+ astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
+ b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
+ assert_(pickle.loads(astring) is np.cos)
+
+ def test_reduceat_shifting_sum(self):
+ L = 6
+ x = np.arange(L)
+ idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
+ assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
+
+ def test_generic_loops(self):
+ """Test generic loops.
+
+ The loops to be tested are:
+
+ PyUFunc_ff_f_As_dd_d
+ PyUFunc_ff_f
+ PyUFunc_dd_d
+ PyUFunc_gg_g
+ PyUFunc_FF_F_As_DD_D
+ PyUFunc_DD_D
+ PyUFunc_FF_F
+ PyUFunc_GG_G
+ PyUFunc_OO_O
+ PyUFunc_OO_O_method
+ PyUFunc_f_f_As_d_d
+ PyUFunc_d_d
+ PyUFunc_f_f
+ PyUFunc_g_g
+ PyUFunc_F_F_As_D_D
+ PyUFunc_F_F
+ PyUFunc_D_D
+ PyUFunc_G_G
+ PyUFunc_O_O
+ PyUFunc_O_O_method
+ PyUFunc_On_Om
+
+ Where:
+
+ f -- float
+ d -- double
+ g -- long double
+ F -- complex float
+ D -- complex double
+ G -- complex long double
+ O -- python object
+
+ It is difficult to assure that each of these loops is entered from the
+ Python level as the special cased loops are a moving target and the
+ corresponding types are architecture dependent. We probably need to
+ define C level testing ufuncs to get at them. For the time being, I've
+ just looked at the signatures registered in the build directory to find
+ relevant functions.
+
+ Fixme, currently untested:
+
+ PyUFunc_ff_f_As_dd_d
+ PyUFunc_FF_F_As_DD_D
+ PyUFunc_f_f_As_d_d
+ PyUFunc_F_F_As_D_D
+ PyUFunc_On_Om
+
+ """
+ fone = np.exp
+ ftwo = lambda x, y: x**y
+ fone_val = 1
+ ftwo_val = 1
+ # check unary PyUFunc_f_f.
+ msg = "PyUFunc_f_f"
+ x = np.zeros(10, dtype=np.single)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+ # check unary PyUFunc_d_d.
+ msg = "PyUFunc_d_d"
+ x = np.zeros(10, dtype=np.double)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+ # check unary PyUFunc_g_g.
+ msg = "PyUFunc_g_g"
+ x = np.zeros(10, dtype=np.longdouble)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+ # check unary PyUFunc_F_F.
+ msg = "PyUFunc_F_F"
+ x = np.zeros(10, dtype=np.csingle)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+ # check unary PyUFunc_D_D.
+ msg = "PyUFunc_D_D"
+ x = np.zeros(10, dtype=np.cdouble)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+ # check unary PyUFunc_G_G.
+ msg = "PyUFunc_G_G"
+ x = np.zeros(10, dtype=np.clongdouble)[0::2]
+ assert_almost_equal(fone(x), fone_val, err_msg=msg)
+
+ # check binary PyUFunc_ff_f.
+ msg = "PyUFunc_ff_f"
+ x = np.ones(10, dtype=np.single)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+ # check binary PyUFunc_dd_d.
+ msg = "PyUFunc_dd_d"
+ x = np.ones(10, dtype=np.double)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+ # check binary PyUFunc_gg_g.
+ msg = "PyUFunc_gg_g"
+ x = np.ones(10, dtype=np.longdouble)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+ # check binary PyUFunc_FF_F.
+ msg = "PyUFunc_FF_F"
+ x = np.ones(10, dtype=np.csingle)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+ # check binary PyUFunc_DD_D.
+ msg = "PyUFunc_DD_D"
+ x = np.ones(10, dtype=np.cdouble)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+ # check binary PyUFunc_GG_G.
+ msg = "PyUFunc_GG_G"
+ x = np.ones(10, dtype=np.clongdouble)[0::2]
+ assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
+
+ # class to use in testing object method loops
+ class foo(object):
+ def conjugate(self):
+ return np.bool_(1)
+
+ def logical_xor(self, obj):
+ return np.bool_(1)
+
+ # check unary PyUFunc_O_O
+ msg = "PyUFunc_O_O"
+ x = np.ones(10, dtype=object)[0::2]
+ assert_(np.all(np.abs(x) == 1), msg)
+ # check unary PyUFunc_O_O_method
+ msg = "PyUFunc_O_O_method"
+ x = np.zeros(10, dtype=object)[0::2]
+ for i in range(len(x)):
+ x[i] = foo()
+ assert_(np.all(np.conjugate(x) == True), msg)
+
+ # check binary PyUFunc_OO_O
+ msg = "PyUFunc_OO_O"
+ x = np.ones(10, dtype=object)[0::2]
+ assert_(np.all(np.add(x, x) == 2), msg)
+ # check binary PyUFunc_OO_O_method
+ msg = "PyUFunc_OO_O_method"
+ x = np.zeros(10, dtype=object)[0::2]
+ for i in range(len(x)):
+ x[i] = foo()
+ assert_(np.all(np.logical_xor(x, x)), msg)
+
+ # check PyUFunc_On_Om
+ # fixme -- I don't know how to do this yet
+
+ def test_all_ufunc(self):
+ """Try to check presence and results of all ufuncs.
+
+ The list of ufuncs comes from generate_umath.py and is as follows:
+
+ ===== ==== ============= =============== ========================
+ done args function types notes
+ ===== ==== ============= =============== ========================
+ n 1 conjugate nums + O
+ n 1 absolute nums + O complex -> real
+ n 1 negative nums + O
+ n 1 sign nums + O -> int
+ n 1 invert bool + ints + O flts raise an error
+ n 1 degrees real + M cmplx raise an error
+ n 1 radians real + M cmplx raise an error
+ n 1 arccos flts + M
+ n 1 arccosh flts + M
+ n 1 arcsin flts + M
+ n 1 arcsinh flts + M
+ n 1 arctan flts + M
+ n 1 arctanh flts + M
+ n 1 cos flts + M
+ n 1 sin flts + M
+ n 1 tan flts + M
+ n 1 cosh flts + M
+ n 1 sinh flts + M
+ n 1 tanh flts + M
+ n 1 exp flts + M
+ n 1 expm1 flts + M
+ n 1 log flts + M
+ n 1 log10 flts + M
+ n 1 log1p flts + M
+ n 1 sqrt flts + M real x < 0 raises error
+ n 1 ceil real + M
+ n 1 trunc real + M
+ n 1 floor real + M
+ n 1 fabs real + M
+ n 1 rint flts + M
+ n 1 isnan flts -> bool
+ n 1 isinf flts -> bool
+ n 1 isfinite flts -> bool
+ n 1 signbit real -> bool
+ n 1 modf real -> (frac, int)
+ n 1 logical_not bool + nums + M -> bool
+ n 2 left_shift ints + O flts raise an error
+ n 2 right_shift ints + O flts raise an error
+ n 2 add bool + nums + O boolean + is ||
+ n 2 subtract bool + nums + O boolean - is ^
+ n 2 multiply bool + nums + O boolean * is &
+ n 2 divide nums + O
+ n 2 floor_divide nums + O
+ n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
+ n 2 fmod nums + M
+ n 2 power nums + O
+ n 2 greater bool + nums + O -> bool
+ n 2 greater_equal bool + nums + O -> bool
+ n 2 less bool + nums + O -> bool
+ n 2 less_equal bool + nums + O -> bool
+ n 2 equal bool + nums + O -> bool
+ n 2 not_equal bool + nums + O -> bool
+ n 2 logical_and bool + nums + M -> bool
+ n 2 logical_or bool + nums + M -> bool
+ n 2 logical_xor bool + nums + M -> bool
+ n 2 maximum bool + nums + O
+ n 2 minimum bool + nums + O
+ n 2 bitwise_and bool + ints + O flts raise an error
+ n 2 bitwise_or bool + ints + O flts raise an error
+ n 2 bitwise_xor bool + ints + O flts raise an error
+ n 2 arctan2 real + M
+ n 2 remainder ints + real + O
+ n 2 hypot real + M
+ ===== ==== ============= =============== ========================
+
+ Types other than those listed will be accepted, but they are cast to
+ the smallest compatible type for which the function is defined. The
+ casting rules are:
+
+ bool -> int8 -> float32
+ ints -> double
+
+ """
+ pass
+
+ # from include/numpy/ufuncobject.h
+ size_inferred = 2
+ can_ignore = 4
+ def test_signature0(self):
+ # the arguments to test_signature are: nin, nout, core_signature
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i),(i)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 1, 0))
+ assert_equal(ixs, (0, 0))
+ assert_equal(flags, (self.size_inferred,))
+ assert_equal(sizes, (-1,))
+
+ def test_signature1(self):
+ # empty core signature; treat as plain ufunc (with trivial core)
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(),()->()")
+ assert_equal(enabled, 0)
+ assert_equal(num_dims, (0, 0, 0))
+ assert_equal(ixs, ())
+ assert_equal(flags, ())
+ assert_equal(sizes, ())
+
+ def test_signature2(self):
+ # more complicated names for variables
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature3(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, u"(i1, i12), (J_1)->(i12, i2)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 2))
+ assert_equal(ixs, (0, 1, 2, 1, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature4(self):
+ # matrix_multiply signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n,k),(k,m)->(n,m)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred,)*3)
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature5(self):
+ # matmul signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n?,k),(k,m?)->(n?,m?)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred | self.can_ignore,
+ self.size_inferred,
+ self.size_inferred | self.can_ignore))
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature6(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "(3)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature7(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3),(03,3),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (0, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature8(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3?),(3?,3?),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature_failure0(self):
+ # in the following calls, a ValueError should be raised because
+ # of error in core signature
+ # FIXME These should be using assert_raises
+
+ # error: extra parenthesis
+ msg = "core_sig: extra parenthesis"
+ try:
+ ret = umt.test_signature(2, 1, "((i)),(i)->()")
+ assert_equal(ret, None, err_msg=msg)
+ except ValueError:
+ pass
+
+ def test_signature_failure1(self):
+ # error: parenthesis matching
+ msg = "core_sig: parenthesis matching"
+ try:
+ ret = umt.test_signature(2, 1, "(i),)i(->()")
+ assert_equal(ret, None, err_msg=msg)
+ except ValueError:
+ pass
+
+ def test_signature_failure2(self):
+ # error: incomplete signature. letters outside of parenthesis are ignored
+ msg = "core_sig: incomplete signature"
+ try:
+ ret = umt.test_signature(2, 1, "(i),->()")
+ assert_equal(ret, None, err_msg=msg)
+ except ValueError:
+ pass
+
+ def test_signature_failure3(self):
+ # error: incomplete signature. 2 output arguments are specified
+ msg = "core_sig: incomplete signature"
+ try:
+ ret = umt.test_signature(2, 2, "(i),(i)->()")
+ assert_equal(ret, None, err_msg=msg)
+ except ValueError:
+ pass
+
+ def test_get_signature(self):
+ assert_equal(umt.inner1d.signature, "(i),(i)->()")
+
+ def test_forced_sig(self):
+ a = 0.5*np.arange(3, dtype='f8')
+ assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
+ assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
+ assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
+ assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
+ assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
+ casting='unsafe'), [0, 0, 1])
+
+ b = np.zeros((3,), dtype='f8')
+ np.add(a, 0.5, out=b)
+ assert_equal(b, [0.5, 1, 1.5])
+ b[:] = 0
+ np.add(a, 0.5, sig='i', out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+
+ def test_true_divide(self):
+ a = np.array(10)
+ b = np.array(20)
+ tgt = np.array(0.5)
+
+ for tc in 'bhilqBHILQefdgFDG':
+ dt = np.dtype(tc)
+ aa = a.astype(dt)
+ bb = b.astype(dt)
+
+ # Check result value and dtype.
+ for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+ # Check with no output type specified
+ if tc in 'FDG':
+ tgt = complex(x)/complex(y)
+ else:
+ tgt = float(x)/float(y)
+
+ res = np.true_divide(x, y)
+ rtol = max(np.finfo(res).resolution, 1e-15)
+ assert_allclose(res, tgt, rtol=rtol)
+
+ if tc in 'bhilqBHILQ':
+ assert_(res.dtype.name == 'float64')
+ else:
+ assert_(res.dtype.name == dt.name )
+
+ # Check with output type specified. This also checks for the
+ # incorrect casts in issue gh-3484 because the unary '-' does
+ # not change types, even for unsigned types, Hence casts in the
+ # ufunc from signed to unsigned and vice versa will lead to
+ # errors in the values.
+ for tcout in 'bhilqBHILQ':
+ dtout = np.dtype(tcout)
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+ for tcout in 'efdg':
+ dtout = np.dtype(tcout)
+ if tc in 'FDG':
+ # Casting complex to float is not allowed
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+ else:
+ tgt = float(x)/float(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ # Some test values result in invalid for float16.
+ with np.errstate(invalid='ignore'):
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res) and tcout == 'e':
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ for tcout in 'FDG':
+ dtout = np.dtype(tcout)
+ tgt = complex(x)/complex(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res):
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ # Check booleans
+ a = np.ones((), dtype=np.bool_)
+ res = np.true_divide(a, a)
+ assert_(res == 1.0)
+ assert_(res.dtype.name == 'float64')
+ res = np.true_divide(~a, a)
+ assert_(res == 0.0)
+ assert_(res.dtype.name == 'float64')
+
+ def test_sum_stability(self):
+ a = np.ones(500, dtype=np.float32)
+ assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
+
+ a = np.ones(500, dtype=np.float64)
+ assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
+
+ def test_sum(self):
+ for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
+ for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+ 128, 1024, 1235):
+ tgt = dt(v * (v + 1) / 2)
+ d = np.arange(1, v + 1, dtype=dt)
+
+ # warning if sum overflows, which it does in float16
+ overflow = not np.isfinite(tgt)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ assert_almost_equal(np.sum(d), tgt)
+ assert_equal(len(w), 1 * overflow)
+
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+ assert_equal(len(w), 2 * overflow)
+
+ d = np.ones(500, dtype=dt)
+ assert_almost_equal(np.sum(d[::2]), 250.)
+ assert_almost_equal(np.sum(d[1::2]), 250.)
+ assert_almost_equal(np.sum(d[::3]), 167.)
+ assert_almost_equal(np.sum(d[1::3]), 167.)
+ assert_almost_equal(np.sum(d[::-2]), 250.)
+ assert_almost_equal(np.sum(d[-1::-2]), 250.)
+ assert_almost_equal(np.sum(d[::-3]), 167.)
+ assert_almost_equal(np.sum(d[-1::-3]), 167.)
+ # sum with first reduction entry != 0
+ d = np.ones((1,), dtype=dt)
+ d += d
+ assert_almost_equal(d, 2.)
+
+ def test_sum_complex(self):
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+ 128, 1024, 1235):
+ tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
+ d = np.empty(v, dtype=dt)
+ d.real = np.arange(1, v + 1)
+ d.imag = -np.arange(1, v + 1)
+ assert_almost_equal(np.sum(d), tgt)
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+
+ d = np.ones(500, dtype=dt) + 1j
+ assert_almost_equal(np.sum(d[::2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[::3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
+ # sum with first reduction entry != 0
+ d = np.ones((1,), dtype=dt) + 1j
+ d += d
+ assert_almost_equal(d, 2. + 2j)
+
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
+ def test_inner1d(self):
+ a = np.arange(6).reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
+ a = np.arange(6)
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
+
+ def test_broadcast(self):
+ msg = "broadcast"
+ a = np.arange(4).reshape((2, 1, 2))
+ b = np.arange(4).reshape((1, 2, 2))
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+ msg = "extend & broadcast loop dimensions"
+ b = np.arange(4).reshape((2, 2))
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+ # Broadcast in core dimensions should fail
+ a = np.arange(8).reshape((4, 2))
+ b = np.arange(4).reshape((4, 1))
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Extend core dimensions should fail
+ a = np.arange(8).reshape((4, 2))
+ b = np.array(7)
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Broadcast should fail
+ a = np.arange(2).reshape((2, 1, 1))
+ b = np.arange(3).reshape((3, 1, 1))
+ assert_raises(ValueError, umt.inner1d, a, b)
+
+ def test_type_cast(self):
+ msg = "type cast"
+ a = np.arange(6, dtype='short').reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+ err_msg=msg)
+ msg = "type cast on one argument"
+ a = np.arange(6).reshape((2, 3))
+ b = a + 0.1
+ assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
+ err_msg=msg)
+
+ def test_endian(self):
+ msg = "big endian"
+ a = np.arange(6, dtype='>i4').reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+ err_msg=msg)
+ msg = "little endian"
+ a = np.arange(6, dtype='()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ # basic tests on inputs (outputs tested below with matrix_multiply).
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ # default
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # integers ok for single axis.
+ c = inner1d(a, b, axes=[-1, -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # mix fine
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can omit last axis.
+ c = inner1d(a, b, axes=[-1, -1])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can pass in other types of integer (with __index__ protocol)
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ assert_array_equal(c, (a * b).sum(-1))
+ # swap some axes
+ c = inner1d(a, b, axes=[0, 0])
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 2])
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ # Check errors for improperly constructed axes arguments.
+ # should have list.
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
+ # needs enough elements
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
+ # should pass in indices.
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
+ # cannot pass an index unless there is only one dimension
+ # (output is wrong in this case)
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
+ # or pass in generally the wrong number of axes
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
+ # axes need to have same length.
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
+
+ # matrix_multiply signature: '(m,n),(n,p)->(m,p)'
+ mm = umt.matrix_multiply
+ a = np.arange(12).reshape((2, 3, 2))
+ b = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # Sanity check.
+ c = mm(a, b)
+ assert_array_equal(c, np.matmul(a, b))
+ # Default axes.
+ c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
+ assert_array_equal(c, np.matmul(a, b))
+ # Default with explicit axes.
+ c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_array_equal(c, np.matmul(a, b))
+ # swap some axes.
+ c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
+ assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
+ b.transpose(0, 3, 1, 2)))
+ # Default with output array.
+ c = np.empty((2, 2, 3, 1))
+ d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b))
+ # Transposed output array
+ c = np.empty((1, 2, 2, 3))
+ d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
+ # Check errors for improperly constructed axes arguments.
+ # wrong argument
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # axes should be list
+ assert_raises(TypeError, mm, a, b, axes=1)
+ assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
+ # list needs to have right length
+ assert_raises(ValueError, mm, a, b, axes=[])
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
+ # list should contain tuples for multiple axes
+ assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
+ assert_raises(TypeError,
+ mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
+ assert_raises(TypeError,
+ mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
+ # tuples should not have duplicated values
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
+ # arrays should have enough axes.
+ z = np.zeros((2, 2))
+ assert_raises(ValueError, mm, z, z[0])
+ assert_raises(ValueError, mm, z, z, out=z[:, 0])
+ assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
+ assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ # Regular ufuncs should not accept axes.
+ assert_raises(TypeError, np.add, 1., 1., axes=[0])
+ # should be able to deal with bad unrelated kwargs.
+ assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
+
+ def test_axis_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axis=-1)
+ assert_array_equal(c, (a * b).sum(-1))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, axis=-1, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ c = inner1d(a, b, axis=0)
+ assert_array_equal(c, (a * b).sum(0))
+ # Sanity checks on innerwt and cumsum.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0),
+ np.sum(a * b * w, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
+ out = np.empty_like(a)
+ b = umt.cumsum(a, out=out, axis=0)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=0))
+ b = umt.cumsum(a, out=out, axis=1)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=-1))
+ # Check errors.
+ # Cannot pass in both axis and axes.
+ assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
+ # Not an integer.
+ assert_raises(TypeError, inner1d, a, b, axis=[0])
+ # more than 1 core dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # Output wrong size in axis.
+ out = np.empty((1, 2, 3), dtype=a.dtype)
+ assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
+ # Regular ufuncs should not accept axis.
+ assert_raises(TypeError, np.add, 1., 1., axis=0)
+
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axis and axes.
+ c = inner1d(a, b, axis=-1, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=False))
+ c = inner1d(a, b, axis=-1, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axis=0, keepdims=False)
+ assert_array_equal(c, (a * b).sum(0, keepdims=False))
+ c = inner1d(a, b, axis=0, keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
+ np.sum(a * b * w, axis=0, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # More than 1 core dimension, and core output dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+ # Regular ufuncs should not accept keepdims.
+ assert_raises(TypeError, np.add, 1., 1., keepdims=False)
+
+ def test_innerwt(self):
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+ a = np.arange(100, 124).reshape((2, 3, 4))
+ b = np.arange(200, 224).reshape((2, 3, 4))
+ w = np.arange(300, 324).reshape((2, 3, 4))
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+ def test_innerwt_empty(self):
+ """Test generalized ufunc with zero-sized operands"""
+ a = np.array([], dtype='f8')
+ b = np.array([], dtype='f8')
+ w = np.array([], dtype='f8')
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+ def test_cross1d(self):
+ """Test with fixed-sized signature."""
+ a = np.eye(3)
+ assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+ out = np.zeros((3, 3))
+ result = umt.cross1d(a[0], a, out)
+ assert_(result is out)
+ assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+ assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+
+ def test_can_ignore_signature(self):
+ # Comparing the effects of ? in signature:
+ # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
+ # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
+ mat = np.arange(12).reshape((2, 3, 2))
+ single_vec = np.arange(2)
+ col_vec = single_vec[:, np.newaxis]
+ col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # matrix @ single column vector with proper dimension
+ mm_col_vec = umt.matrix_multiply(mat, col_vec)
+ # matmul does the same thing
+ matmul_col_vec = umt.matmul(mat, col_vec)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # matrix @ vector without dimension making it a column vector.
+ # matrix multiply fails -> missing core dim.
+ assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+ # matmul mimicker passes, and returns a vector.
+ matmul_col = umt.matmul(mat, single_vec)
+ assert_array_equal(matmul_col, mm_col_vec.squeeze())
+ # Now with a column array: same as for column vector,
+ # broadcasting sensibly.
+ mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+ matmul_col_vec = umt.matmul(mat, col_vec_array)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # As above, but for row vector
+ single_vec = np.arange(3)
+ row_vec = single_vec[np.newaxis, :]
+ row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+ # row vector @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec, mat)
+ matmul_row_vec = umt.matmul(row_vec, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # single row vector @ matrix
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+ matmul_row = umt.matmul(single_vec, mat)
+ assert_array_equal(matmul_row, mm_row_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+ matmul_row_vec = umt.matmul(row_vec_array, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # Now for vector combinations
+ # row vector @ column vector
+ col_vec = row_vec.T
+ col_vec_array = row_vec_array.swapaxes(-2, -1)
+ mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+ matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+ assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+ # single row vector @ single col vector
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+ matmul_row_col = umt.matmul(single_vec, single_vec)
+ assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+ matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+ assert_array_equal(matmul_row_col_array, mm_row_col_array)
+ # Finally, check that things are *not* squeezed if one gives an
+ # output.
+ out = np.zeros_like(mm_row_col_array)
+ out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ out[:] = 0
+ out = umt.matmul(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ # And check one cannot put missing dimensions back.
+ out = np.zeros_like(mm_row_col_vec)
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+ out)
+ # But fine for matmul, since it is just a broadcast.
+ out = umt.matmul(single_vec, single_vec, out)
+ assert_array_equal(out, mm_row_col_vec.squeeze())
+
+ def test_matrix_multiply(self):
+ self.compare_matrix_multiply_results(np.long)
+ self.compare_matrix_multiply_results(np.double)
+
+ def test_matrix_multiply_umath_empty(self):
+ res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
+ assert_array_equal(res, np.zeros((0, 0)))
+ res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
+ assert_array_equal(res, np.zeros((10, 10)))
+
+ def compare_matrix_multiply_results(self, tp):
+ d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+ d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+ msg = "matrix multiply on type %s" % d1.dtype.name
+
+ def permute_n(n):
+ if n == 1:
+ return ([0],)
+ ret = ()
+ base = permute_n(n-1)
+ for perm in base:
+ for i in range(n):
+ new = perm + [n-1]
+ new[n-1] = new[i]
+ new[i] = n-1
+ ret += (new,)
+ return ret
+
+ def slice_n(n):
+ if n == 0:
+ return ((),)
+ ret = ()
+ base = slice_n(n-1)
+ for sl in base:
+ ret += (sl+(slice(None),),)
+ ret += (sl+(slice(0, 1),),)
+ return ret
+
+ def broadcastable(s1, s2):
+ return s1 == s2 or s1 == 1 or s2 == 1
+
+ permute_3 = permute_n(3)
+ slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
+
+ ref = True
+ for p1 in permute_3:
+ for p2 in permute_3:
+ for s1 in slice_3:
+ for s2 in slice_3:
+ a1 = d1.transpose(p1)[s1]
+ a2 = d2.transpose(p2)[s2]
+ ref = ref and a1.base is not None
+ ref = ref and a2.base is not None
+ if (a1.shape[-1] == a2.shape[-2] and
+ broadcastable(a1.shape[0], a2.shape[0])):
+ assert_array_almost_equal(
+ umt.matrix_multiply(a1, a2),
+ np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
+ a1[..., np.newaxis,:], axis=-1),
+ err_msg=msg + ' %s %s' % (str(a1.shape),
+ str(a2.shape)))
+
+ assert_equal(ref, True, err_msg="reference check")
+
+ def test_euclidean_pdist(self):
+ a = np.arange(12, dtype=float).reshape(4, 3)
+ out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
+ umt.euclidean_pdist(a, out)
+ b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
+ b = b[~np.tri(a.shape[0], dtype=bool)]
+ assert_almost_equal(out, b)
+ # An output array is required to determine p with signature (n,d)->(p)
+ assert_raises(ValueError, umt.euclidean_pdist, a)
+
+ def test_cumsum(self):
+ a = np.arange(10)
+ result = umt.cumsum(a)
+ assert_array_equal(result, a.cumsum())
+
+ def test_object_logical(self):
+ a = np.array([3, None, True, False, "test", ""], dtype=object)
+ assert_equal(np.logical_or(a, None),
+ np.array([x or None for x in a], dtype=object))
+ assert_equal(np.logical_or(a, True),
+ np.array([x or True for x in a], dtype=object))
+ assert_equal(np.logical_or(a, 12),
+ np.array([x or 12 for x in a], dtype=object))
+ assert_equal(np.logical_or(a, "blah"),
+ np.array([x or "blah" for x in a], dtype=object))
+
+ assert_equal(np.logical_and(a, None),
+ np.array([x and None for x in a], dtype=object))
+ assert_equal(np.logical_and(a, True),
+ np.array([x and True for x in a], dtype=object))
+ assert_equal(np.logical_and(a, 12),
+ np.array([x and 12 for x in a], dtype=object))
+ assert_equal(np.logical_and(a, "blah"),
+ np.array([x and "blah" for x in a], dtype=object))
+
+ assert_equal(np.logical_not(a),
+ np.array([not x for x in a], dtype=object))
+
+ assert_equal(np.logical_or.reduce(a), 3)
+ assert_equal(np.logical_and.reduce(a), None)
+
+ def test_object_comparison(self):
+ class HasComparisons(object):
+ def __eq__(self, other):
+ return '=='
+
+ arr0d = np.array(HasComparisons())
+ assert_equal(arr0d == arr0d, True)
+ assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
+ assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
+
+ arr1d = np.array([HasComparisons()])
+ assert_equal(arr1d == arr1d, np.array([True]))
+ assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
+ assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
+ def test_object_array_reduction(self):
+ # Reductions on object arrays
+ a = np.array(['a', 'b', 'c'], dtype=object)
+ assert_equal(np.sum(a), 'abc')
+ assert_equal(np.max(a), 'c')
+ assert_equal(np.min(a), 'a')
+ a = np.array([True, False, True], dtype=object)
+ assert_equal(np.sum(a), 2)
+ assert_equal(np.prod(a), 0)
+ assert_equal(np.any(a), True)
+ assert_equal(np.all(a), False)
+ assert_equal(np.max(a), True)
+ assert_equal(np.min(a), False)
+ assert_equal(np.array([[1]], dtype=object).sum(), 1)
+ assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
+
+ def test_object_array_accumulate_inplace(self):
+ # Checks that in-place accumulates work, see also gh-7402
+ arr = np.ones(4, dtype=object)
+ arr[:] = [[1] for i in range(4)]
+ # Twice reproduced also for tuples:
+ np.add.accumulate(arr, out=arr)
+ np.add.accumulate(arr, out=arr)
+ assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]]))
+
+ # And the same if the axis argument is used
+ arr = np.ones((2, 4), dtype=object)
+ arr[0, :] = [[2] for i in range(4)]
+ np.add.accumulate(arr, out=arr, axis=-1)
+ np.add.accumulate(arr, out=arr, axis=-1)
+ assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]]))
+
+ def test_object_array_reduceat_inplace(self):
+ # Checks that in-place reduceats work, see also gh-7465
+ arr = np.empty(4, dtype=object)
+ arr[:] = [[1] for i in range(4)]
+ out = np.empty(4, dtype=object)
+ out[:] = [[1] for i in range(4)]
+ np.add.reduceat(arr, np.arange(4), out=arr)
+ np.add.reduceat(arr, np.arange(4), out=arr)
+ assert_array_equal(arr, out)
+
+ # And the same if the axis argument is used
+ arr = np.ones((2, 4), dtype=object)
+ arr[0, :] = [[2] for i in range(4)]
+ out = np.ones((2, 4), dtype=object)
+ out[0, :] = [[2] for i in range(4)]
+ np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+ np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+ assert_array_equal(arr, out)
+
+ def test_zerosize_reduction(self):
+ # Test with default dtype and object dtype
+ for a in [[], np.array([], dtype=object)]:
+ assert_equal(np.sum(a), 0)
+ assert_equal(np.prod(a), 1)
+ assert_equal(np.any(a), False)
+ assert_equal(np.all(a), True)
+ assert_raises(ValueError, np.max, a)
+ assert_raises(ValueError, np.min, a)
+
+ def test_axis_out_of_bounds(self):
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.all, axis=1)
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.all, axis=-2)
+
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.any, axis=1)
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.any, axis=-2)
+
+ def test_scalar_reduction(self):
+ # The functions 'sum', 'prod', etc allow specifying axis=0
+ # even for scalars
+ assert_equal(np.sum(3, axis=0), 3)
+ assert_equal(np.prod(3.5, axis=0), 3.5)
+ assert_equal(np.any(True, axis=0), True)
+ assert_equal(np.all(False, axis=0), False)
+ assert_equal(np.max(3, axis=0), 3)
+ assert_equal(np.min(2.5, axis=0), 2.5)
+
+ # Check scalar behaviour for ufuncs without an identity
+ assert_equal(np.power.reduce(3), 3)
+
+ # Make sure that scalars are coming out from this operation
+ assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
+
+ # check if scalars/0-d arrays get cast
+ assert_(type(np.any(0, axis=0)) is np.bool_)
+
+ # assert that 0-d arrays get wrapped
+ class MyArray(np.ndarray):
+ pass
+ a = np.array(1).view(MyArray)
+ assert_(type(np.any(a)) is MyArray)
+
+ def test_casting_out_param(self):
+ # Test that it's possible to do casts on output
+ a = np.ones((200, 100), np.int64)
+ b = np.ones((200, 100), np.int64)
+ c = np.ones((200, 100), np.float64)
+ np.add(a, b, out=c)
+ assert_equal(c, 2)
+
+ a = np.zeros(65536)
+ b = np.zeros(65536, dtype=np.float32)
+ np.subtract(a, 0, out=b)
+ assert_equal(b, 0)
+
+ def test_where_param(self):
+ # Test that the where= ufunc parameter works with regular arrays
+ a = np.arange(7)
+ b = np.ones(7)
+ c = np.zeros(7)
+ np.add(a, b, out=c, where=(a % 2 == 1))
+ assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
+
+ a = np.arange(4).reshape(2, 2) + 2
+ np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
+ assert_equal(a, [[2, 27], [16, 5]])
+ # Broadcasting the where= parameter
+ np.subtract(a, 2, out=a, where=[True, False])
+ assert_equal(a, [[0, 27], [14, 5]])
+
+ def test_where_param_buffer_output(self):
+ # This test is temporarily skipped because it requires
+ # adding masking features to the nditer to work properly
+
+ # With casting on output
+ a = np.ones(10, np.int64)
+ b = np.ones(10, np.int64)
+ c = 1.5 * np.ones(10, np.float64)
+ np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
+ assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+
+ def test_where_param_alloc(self):
+ # With casting and allocated output
+ a = np.array([1], dtype=np.int64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ # No casting and allocated output
+ a = np.array([1], dtype=np.float64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ def check_identityless_reduction(self, a):
+ # np.minimum.reduce is an identityless reduction
+
+ # Verify that it sees the zero at various positions
+ a[...] = 1
+ a[1, 0, 0] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[1, 1, 1, 1], [0, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[1, 1, 1], [0, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ a[...] = 1
+ a[0, 1, 0] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[0, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[1, 0, 1], [1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ a[...] = 1
+ a[0, 0, 1] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[1, 0, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[0, 1, 1], [1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ def test_identityless_reduction_corder(self):
+ a = np.empty((2, 3, 4), order='C')
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_forder(self):
+ a = np.empty((2, 3, 4), order='F')
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_otherorder(self):
+ a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_noncontig(self):
+ a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
+ a = a[1:, 1:, 1:]
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_noncontig_unaligned(self):
+ a = np.empty((3*4*5*8 + 1,), dtype='i1')
+ a = a[1:].view(dtype='f8')
+ a.shape = (3, 4, 5)
+ a = a[1:, 1:, 1:]
+ self.check_identityless_reduction(a)
+
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
+ def test_identityless_reduction_nonreorderable(self):
+ a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
+
+ res = np.divide.reduce(a, axis=0)
+ assert_equal(res, [8.0, 4.0, 8.0])
+
+ res = np.divide.reduce(a, axis=1)
+ assert_equal(res, [2.0, 8.0])
+
+ res = np.divide.reduce(a, axis=())
+ assert_equal(res, a)
+
+ assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
+
+ def test_reduce_zero_axis(self):
+ # If we have a n x m array and do a reduction with axis=1, then we are
+ # doing n reductions, and each reduction takes an m-element array. For
+ # a reduction operation without an identity, then:
+ # n > 0, m > 0: fine
+ # n = 0, m > 0: fine, doing 0 reductions of m-element arrays
+ # n > 0, m = 0: can't reduce a 0-element array, ValueError
+ # n = 0, m = 0: can't reduce a 0-element array, ValueError (for
+ # consistency with the above case)
+ # This test doesn't actually look at return values, it just checks to
+ # make sure that error we get an error in exactly those cases where we
+ # expect one, and assumes the calculations themselves are done
+ # correctly.
+
+ def ok(f, *args, **kwargs):
+ f(*args, **kwargs)
+
+ def err(f, *args, **kwargs):
+ assert_raises(ValueError, f, *args, **kwargs)
+
+ def t(expect, func, n, m):
+ expect(func, np.zeros((n, m)), axis=1)
+ expect(func, np.zeros((m, n)), axis=0)
+ expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
+ expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
+ expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
+ expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
+ expect(func, np.zeros((m // 3, m // 3, m // 3,
+ n // 2, n // 2)),
+ axis=(0, 1, 2))
+ # Check what happens if the inner (resp. outer) dimensions are a
+ # mix of zero and non-zero:
+ expect(func, np.zeros((10, m, n)), axis=(0, 1))
+ expect(func, np.zeros((10, n, m)), axis=(0, 2))
+ expect(func, np.zeros((m, 10, n)), axis=0)
+ expect(func, np.zeros((10, m, n)), axis=1)
+ expect(func, np.zeros((10, n, m)), axis=2)
+
+ # np.maximum is just an arbitrary ufunc with no reduction identity
+ assert_equal(np.maximum.identity, None)
+ t(ok, np.maximum.reduce, 30, 30)
+ t(ok, np.maximum.reduce, 0, 30)
+ t(err, np.maximum.reduce, 30, 0)
+ t(err, np.maximum.reduce, 0, 0)
+ err(np.maximum.reduce, [])
+ np.maximum.reduce(np.zeros((0, 0)), axis=())
+
+ # all of the combinations are fine for a reduction that has an
+ # identity
+ t(ok, np.add.reduce, 30, 30)
+ t(ok, np.add.reduce, 0, 30)
+ t(ok, np.add.reduce, 30, 0)
+ t(ok, np.add.reduce, 0, 0)
+ np.add.reduce([])
+ np.add.reduce(np.zeros((0, 0)), axis=())
+
+ # OTOH, accumulate always makes sense for any combination of n and m,
+ # because it maps an m-element array to an m-element array. These
+ # tests are simpler because accumulate doesn't accept multiple axes.
+ for uf in (np.maximum, np.add):
+ uf.accumulate(np.zeros((30, 0)), axis=0)
+ uf.accumulate(np.zeros((0, 30)), axis=0)
+ uf.accumulate(np.zeros((30, 30)), axis=0)
+ uf.accumulate(np.zeros((0, 0)), axis=0)
+
+ def test_safe_casting(self):
+ # In old versions of numpy, in-place operations used the 'unsafe'
+ # casting rules. In versions >= 1.10, 'same_kind' is the
+ # default and an exception is raised instead of a warning.
+ # when 'same_kind' is not satisfied.
+ a = np.array([1, 2, 3], dtype=int)
+ # Non-in-place addition is fine
+ assert_array_equal(assert_no_warnings(np.add, a, 1.1),
+ [2.1, 3.1, 4.1])
+ assert_raises(TypeError, np.add, a, 1.1, out=a)
+
+ def add_inplace(a, b):
+ a += b
+
+ assert_raises(TypeError, add_inplace, a, 1.1)
+ # Make sure that explicitly overriding the exception is allowed:
+ assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
+ assert_array_equal(a, [2, 3, 4])
+
+ def test_ufunc_custom_out(self):
+ # Test ufunc with built in input types and custom output type
+
+ a = np.array([0, 1, 2], dtype='i8')
+ b = np.array([0, 1, 2], dtype='i8')
+ c = np.empty(3, dtype=_rational_tests.rational)
+
+ # Output must be specified so numpy knows what
+ # ufunc signature to look for
+ result = _rational_tests.test_add(a, b, c)
+ target = np.array([0, 2, 4], dtype=_rational_tests.rational)
+ assert_equal(result, target)
+
+ # no output type should raise TypeError
+ with assert_raises(TypeError):
+ _rational_tests.test_add(a, b)
+
+ def test_operand_flags(self):
+ a = np.arange(16, dtype='l').reshape(4, 4)
+ b = np.arange(9, dtype='l').reshape(3, 3)
+ opflag_tests.inplace_add(a[:-1, :-1], b)
+ assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
+ [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
+
+ a = np.array(0)
+ opflag_tests.inplace_add(a, 3)
+ assert_equal(a, 3)
+ opflag_tests.inplace_add(a, [3, 4])
+ assert_equal(a, 10)
+
+ def test_struct_ufunc(self):
+ import numpy.core._struct_ufunc_tests as struct_ufunc
+
+ a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+ b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+
+ result = struct_ufunc.add_triplet(a, b)
+ assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
+
+ def test_custom_ufunc(self):
+ a = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+ b = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+
+ result = _rational_tests.test_add_rationals(a, b)
+ expected = np.array(
+ [_rational_tests.rational(1),
+ _rational_tests.rational(2, 3),
+ _rational_tests.rational(1, 2)],
+ dtype=_rational_tests.rational)
+ assert_equal(result, expected)
+
+ def test_custom_ufunc_forced_sig(self):
+ # gh-9351 - looking for a non-first userloop would previously hang
+ with assert_raises(TypeError):
+ np.multiply(_rational_tests.rational(1), 1,
+ signature=(_rational_tests.rational, int, None))
+
+ def test_custom_array_like(self):
+
+ class MyThing(object):
+ __array_priority__ = 1000
+
+ rmul_count = 0
+ getitem_count = 0
+
+ def __init__(self, shape):
+ self.shape = shape
+
+ def __len__(self):
+ return self.shape[0]
+
+ def __getitem__(self, i):
+ MyThing.getitem_count += 1
+ if not isinstance(i, tuple):
+ i = (i,)
+ if len(i) > self.ndim:
+ raise IndexError("boo")
+
+ return MyThing(self.shape[len(i):])
+
+ def __rmul__(self, other):
+ MyThing.rmul_count += 1
+ return self
+
+ np.float64(5)*MyThing((3, 3))
+ assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
+ assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
+
+ def test_inplace_fancy_indexing(self):
+
+ a = np.arange(10)
+ np.add.at(a, [2, 5, 2], 1)
+ assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
+
+ a = np.arange(10)
+ b = np.array([100, 100, 100])
+ np.add.at(a, [2, 5, 2], b)
+ assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+ np.add.at(a, (slice(None), [1, 2, 1]), b)
+ assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
+ assert_equal(a,
+ [[[0, 401, 202],
+ [3, 404, 205],
+ [6, 407, 208]],
+
+ [[9, 410, 211],
+ [12, 413, 214],
+ [15, 416, 217]],
+
+ [[18, 419, 220],
+ [21, 422, 223],
+ [24, 425, 226]]])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+ np.add.at(a, ([1, 2, 1], slice(None)), b)
+ assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
+ assert_equal(a,
+ [[[0, 1, 2],
+ [203, 404, 605],
+ [106, 207, 308]],
+
+ [[9, 10, 11],
+ [212, 413, 614],
+ [115, 216, 317]],
+
+ [[18, 19, 20],
+ [221, 422, 623],
+ [124, 225, 326]]])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (0, [1, 2, 1]), b)
+ assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
+ assert_equal(a,
+ [[[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]],
+
+ [[209, 410, 611],
+ [12, 13, 14],
+ [15, 16, 17]],
+
+ [[118, 219, 320],
+ [21, 22, 23],
+ [24, 25, 26]]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), slice(None), slice(None)), b)
+ assert_equal(a,
+ [[[100, 201, 302],
+ [103, 204, 305],
+ [106, 207, 308]],
+
+ [[109, 210, 311],
+ [112, 213, 314],
+ [115, 216, 317]],
+
+ [[118, 219, 320],
+ [121, 222, 323],
+ [124, 225, 326]]])
+
+ a = np.arange(10)
+ np.negative.at(a, [2, 5, 2])
+ assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
+
+ # Test 0-dim array
+ a = np.array(0)
+ np.add.at(a, (), 1)
+ assert_equal(a, 1)
+
+ assert_raises(IndexError, np.add.at, a, 0, 1)
+ assert_raises(IndexError, np.add.at, a, [], 1)
+
+ # Test mixed dtypes
+ a = np.arange(10)
+ np.power.at(a, [1, 2, 3, 2], 3.5)
+ assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
+
+ # Test boolean indexing and boolean ufuncs
+ a = np.arange(10)
+ index = a % 2 == 0
+ np.equal.at(a, index, [0, 2, 4, 6, 8])
+ assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
+
+ # Test unary operator
+ a = np.arange(10, dtype='u4')
+ np.invert.at(a, [2, 5, 2])
+ assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
+
+ # Test empty subspace
+ orig = np.arange(4)
+ a = orig[:, None][:, 0:0]
+ np.add.at(a, [0, 1], 3)
+ assert_array_equal(orig, np.arange(4))
+
+ # Test with swapped byte order
+ index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
+ values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
+ np.add.at(values, index, 3)
+ assert_array_equal(values, [1, 8, 6, 4])
+
+ # Test exception thrown
+ values = np.array(['a', 1], dtype=object)
+ assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+ assert_array_equal(values, np.array(['a', 1], dtype=object))
+
+ # Test multiple output ufuncs raise error, gh-5665
+ assert_raises(ValueError, np.modf.at, np.arange(10), [1])
+
+ def test_reduce_arguments(self):
+ f = np.add.reduce
+ d = np.ones((5,2), dtype=int)
+ o = np.ones((2,), dtype=d.dtype)
+ r = o * 5
+ assert_equal(f(d), r)
+ # a, axis=0, dtype=None, out=None, keepdims=False
+ assert_equal(f(d, axis=0), r)
+ assert_equal(f(d, 0), r)
+ assert_equal(f(d, 0, dtype=None), r)
+ assert_equal(f(d, 0, dtype='i'), r)
+ assert_equal(f(d, 0, 'i'), r)
+ assert_equal(f(d, 0, None), r)
+ assert_equal(f(d, 0, None, out=None), r)
+ assert_equal(f(d, 0, None, out=o), r)
+ assert_equal(f(d, 0, None, o), r)
+ assert_equal(f(d, 0, None, None), r)
+ assert_equal(f(d, 0, None, None, keepdims=False), r)
+ assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
+ # multiple keywords
+ assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
+
+ # too little
+ assert_raises(TypeError, f)
+ # too much
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
+ # invalid axis
+ assert_raises(TypeError, f, d, "invalid")
+ assert_raises(TypeError, f, d, axis="invalid")
+ assert_raises(TypeError, f, d, axis="invalid", dtype=None,
+ keepdims=True)
+ # invalid dtype
+ assert_raises(TypeError, f, d, 0, "invalid")
+ assert_raises(TypeError, f, d, dtype="invalid")
+ assert_raises(TypeError, f, d, dtype="invalid", out=None)
+ # invalid out
+ assert_raises(TypeError, f, d, 0, None, "invalid")
+ assert_raises(TypeError, f, d, out="invalid")
+ assert_raises(TypeError, f, d, out="invalid", dtype=None)
+ # keepdims boolean, no invalid value
+ # assert_raises(TypeError, f, d, 0, None, None, "invalid")
+ # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
+ # invalid mix
+ assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
+ out=None)
+
+ # invalid keyord
+ assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
+ assert_raises(TypeError, f, d, invalid=0)
+ assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
+ out=None)
+ assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
+ out=None, invalid=0)
+ assert_raises(TypeError, f, d, axis=0, dtype=None,
+ out=None, invalid=0)
+
+ def test_structured_equal(self):
+ # https://github.com/numpy/numpy/issues/4855
+
+ class MyA(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*(input.view(np.ndarray)
+ for input in inputs), **kwargs)
+ a = np.arange(12.).reshape(4,3)
+ ra = a.view(dtype=('f8,f8,f8')).squeeze()
+ mra = ra.view(MyA)
+
+ target = np.array([ True, False, False, False], dtype=bool)
+ assert_equal(np.all(target == (mra == ra[0])), True)
+
+ def test_scalar_equal(self):
+ # Scalar comparisons should always work, without deprecation warnings.
+ # even when the ufunc fails.
+ a = np.array(0.)
+ b = np.array('a')
+ assert_(a != b)
+ assert_(b != a)
+ assert_(not (a == b))
+ assert_(not (b == a))
+
+ def test_NotImplemented_not_returned(self):
+ # See gh-5964 and gh-2091. Some of these functions are not operator
+ # related and were fixed for other reasons in the past.
+ binary_funcs = [
+ np.power, np.add, np.subtract, np.multiply, np.divide,
+ np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
+ np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
+ np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
+ np.logical_and, np.logical_or, np.logical_xor, np.maximum,
+ np.minimum, np.mod,
+ np.greater, np.greater_equal, np.less, np.less_equal,
+ np.equal, np.not_equal]
+
+ a = np.array('1')
+ b = 1
+ c = np.array([1., 2.])
+ for f in binary_funcs:
+ assert_raises(TypeError, f, a, b)
+ assert_raises(TypeError, f, c, a)
+
+ def test_reduce_noncontig_output(self):
+ # Check that reduction deals with non-contiguous output arrays
+ # appropriately.
+ #
+ # gh-8036
+
+ x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
+ x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
+ y = y_base[::2,:]
+
+ y_base_copy = y_base.copy()
+
+ r0 = np.add.reduce(x, out=y.copy(), axis=2)
+ r1 = np.add.reduce(x, out=y, axis=2)
+
+ # The results should match, and y_base shouldn't get clobbered
+ assert_equal(r0, r1)
+ assert_equal(y_base[1,:], y_base_copy[1,:])
+ assert_equal(y_base[3,:], y_base_copy[3,:])
+
+ def test_no_doc_string(self):
+ # gh-9337
+ assert_('\n' not in umt.inner1d_no_doc.__doc__)
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc
new file mode 100644
index 0000000..01d6f07
Binary files /dev/null and b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_ufunc.pyc differ
diff --git a/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py
new file mode 100644
index 0000000..eb6a67f
--- /dev/null
+++ b/project/venv/lib/python2.7/site-packages/numpy/core/tests/test_umath.py
@@ -0,0 +1,2920 @@
+from __future__ import division, absolute_import, print_function
+
+import platform
+import warnings
+import fnmatch
+import itertools
+import pytest
+
+import numpy.core.umath as ncu
+from numpy.core import _umath_tests as ncu_tests
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ assert_allclose, assert_no_warnings, suppress_warnings,
+ _gen_alignment_data
+ )
+
+
+def on_powerpc():
+ """ True if we are running on a Power PC platform."""
+ return platform.processor() == 'powerpc' or \
+ platform.machine().startswith('ppc')
+
+
+class _FilterInvalids(object):
+ def setup(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown(self):
+ np.seterr(**self.olderr)
+
+
+class TestConstants(object):
+ def test_pi(self):
+ assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
+
+ def test_e(self):
+ assert_allclose(ncu.e, 2.718281828459045, 1e-15)
+
+ def test_euler_gamma(self):
+ assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+
+
+class TestOut(object):
+ def test_out_subok(self):
+ for subok in (True, False):
+ a = np.array(0.5)
+ o = np.empty(())
+
+ r = np.add(a, 2, o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=(o,), subok=subok)
+ assert_(r is o)
+
+ d = np.array(5.7)
+ o1 = np.empty(())
+ o2 = np.empty((), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ assert_(r1 is o1)
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, o1, o2, subok=subok)
+ assert_(r1 is o1)
+ assert_(r2 is o2)
+
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+ assert_(r1 is o1)
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
+ assert_(r1 is o1)
+ assert_(r2 is o2)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+ assert_(r1 is o1)
+ assert_(w[0].category is DeprecationWarning)
+
+ assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
+ assert_raises(TypeError, np.add, a, 2, [], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
+ o.flags.writeable = False
+ assert_raises(ValueError, np.add, a, 2, o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
+
+ def test_out_wrap_subok(self):
+ class ArrayWrap(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls, arr):
+ return np.asarray(arr).view(cls).copy()
+
+ def __array_wrap__(self, arr, context):
+ return arr.view(type(self))
+
+ for subok in (True, False):
+ a = ArrayWrap([0.5])
+
+ r = np.add(a, 2, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=(None,), subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ d = ArrayWrap([5.7])
+ o1 = np.empty((1,))
+ o2 = np.empty((1,), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+ assert_(w[0].category is DeprecationWarning)
+
+
+class TestComparisons(object):
+ def test_ignore_object_identity_in_equal(self):
+ # Check error raised when comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType(object):
+ def __eq__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.equal(a, a), [False])
+
+ def test_ignore_object_identity_in_not_equal(self):
+ # Check error raised when comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.not_equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType(object):
+ def __ne__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.not_equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.not_equal(a, a), [True])
+
+
+class TestAdd(object):
+ def test_reduce_alignment(self):
+ # gh-9876
+ # make sure arrays with weird strides work with the optimizations in
+ # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
+ # 4 byte offset, even though its itemsize is 8.
+ a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
+ a['a'] = -1
+ assert_equal(a['b'].sum(), 0)
+
+
+class TestDivision(object):
+ def test_division_int(self):
+ # int division should follow Python
+ x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
+ if 5 / 10 == 0.5:
+ assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
+ -0.05, -0.1, -0.9, -1, -1.2])
+ else:
+ assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+ assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+ assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
+
+ def test_division_complex(self):
+ # check that implementation is correct
+ msg = "Complex division implementation check"
+ x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
+ assert_almost_equal(x**2/x, x, err_msg=msg)
+ # check overflow, underflow
+ msg = "Complex division overflow/underflow check"
+ x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+ y = x**2/x
+ assert_almost_equal(y/x, [1, 1], err_msg=msg)
+
+ def test_zero_division_complex(self):
+ with np.errstate(invalid="ignore", divide="ignore"):
+ x = np.array([0.0], dtype=np.complex128)
+ y = 1.0/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.inf, np.nan)/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.nan, np.inf)/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.inf, np.inf)/x
+ assert_(np.isinf(y)[0])
+ y = 0.0/x
+ assert_(np.isnan(y)[0])
+
+ def test_floor_division_complex(self):
+ # check that implementation is correct
+ msg = "Complex floor division implementation check"
+ x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
+ y = np.array([0., -1., 0., 0.], dtype=np.complex128)
+ assert_equal(np.floor_divide(x**2, x), y, err_msg=msg)
+ # check overflow, underflow
+ msg = "Complex floor division overflow/underflow check"
+ x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+ y = np.floor_divide(x**2, x)
+ assert_equal(y, [1.e+110, 0], err_msg=msg)
+
+ def test_floor_division_signed_zero(self):
+ # Check that the sign bit is correctly set when dividing positive and
+ # negative zero by one.
+ x = np.zeros(10)
+ assert_equal(np.signbit(x//1), 0)
+ assert_equal(np.signbit((-x)//1), 1)
+
+def floor_divide_and_remainder(x, y):
+ return (np.floor_divide(x, y), np.remainder(x, y))
+
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestRemainder(object):
+
+ def test_remainder_basic(self):
+ dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)
+ b = np.array(sg2*19, dtype=dt2)
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_remainder_exact(self):
+ # test that float results are exact for small integers. This also
+ # holds for the same integers scaled by powers of two.
+ nlst = list(range(-127, 0))
+ plst = list(range(1, 128))
+ dividend = nlst + [0] + plst
+ divisor = nlst + plst
+ arg = list(itertools.product(dividend, divisor))
+ tgt = list(divmod(*t) for t in arg)
+
+ a, b = np.array(arg, dtype=int).T
+ # convert exact integer results from Python to float so that
+ # signed zero can be used, it is checked.
+ tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+ tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+ tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ div, rem = op(fa, fb)
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
+
+ def test_float_remainder_roundoff(self):
+ # gh-6127
+ dt = np.typecodes['Float']
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)
+ b = np.array(sg2*6e-8, dtype=dt2)
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_remainder_corner_cases(self):
+ # Check remainder magnitude.
+ for dt in np.typecodes['Float']:
+ b = np.array(1.0, dtype=dt)
+ a = np.nextafter(np.array(0.0, dtype=dt), -b)
+ rem = np.remainder(a, b)
+ assert_(rem <= b, 'dt: %s' % dt)
+ rem = np.remainder(-a, -b)
+ assert_(rem >= -b, 'dt: %s' % dt)
+
+ # Check nans, inf
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+ for dt in np.typecodes['Float']:
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ finf = np.array(np.inf, dtype=dt)
+ fnan = np.array(np.nan, dtype=dt)
+ rem = np.remainder(fone, fzer)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ # MSVC 2008 returns NaN here, so disable the check.
+ #rem = np.remainder(fone, finf)
+ #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
+ rem = np.remainder(fone, fnan)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ rem = np.remainder(finf, fone)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+
+
+class TestCbrt(object):
+ def test_cbrt_scalar(self):
+ assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
+
+ def test_cbrt(self):
+ x = np.array([1., 2., -3., np.inf, -np.inf])
+ assert_almost_equal(np.cbrt(x**3), x)
+
+ assert_(np.isnan(np.cbrt(np.nan)))
+ assert_equal(np.cbrt(np.inf), np.inf)
+ assert_equal(np.cbrt(-np.inf), -np.inf)
+
+
+class TestPower(object):
+ def test_power_float(self):
+ x = np.array([1., 2., 3.])
+ assert_equal(x**0, [1., 1., 1.])
+ assert_equal(x**1, x)
+ assert_equal(x**2, [1., 4., 9.])
+ y = x.copy()
+ y **= 2
+ assert_equal(y, [1., 4., 9.])
+ assert_almost_equal(x**(-1), [1., 0.5, 1./3])
+ assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
+
+ for out, inp, msg in _gen_alignment_data(dtype=np.float32,
+ type='unary',
+ max_size=11):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+ for out, inp, msg in _gen_alignment_data(dtype=np.float64,
+ type='unary',
+ max_size=7):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+ def test_power_complex(self):
+ x = np.array([1+2j, 2+3j, 3+4j])
+ assert_equal(x**0, [1., 1., 1.])
+ assert_equal(x**1, x)
+ assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
+ assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
+ assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
+ assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
+ assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
+ assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
+ (-117-44j)/15625])
+ assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
+ ncu.sqrt(3+4j)])
+ norm = 1./((x**14)[0])
+ assert_almost_equal(x**14 * norm,
+ [i * norm for i in [-76443+16124j, 23161315+58317492j,
+ 5583548873 + 2465133864j]])
+
+ # Ticket #836
+ def assert_complex_equal(x, y):
+ assert_array_equal(x.real, y.real)
+ assert_array_equal(x.imag, y.imag)
+
+ for z in [complex(0, np.inf), complex(1, np.inf)]:
+ z = np.array([z], dtype=np.complex_)
+ with np.errstate(invalid="ignore"):
+ assert_complex_equal(z**1, z)
+ assert_complex_equal(z**2, z*z)
+ assert_complex_equal(z**3, z*z*z)
+
+ def test_power_zero(self):
+ # ticket #1271
+ zero = np.array([0j])
+ one = np.array([1+0j])
+ cnan = np.array([complex(np.nan, np.nan)])
+ # FIXME cinf not tested.
+ #cinf = np.array([complex(np.inf, 0)])
+
+ def assert_complex_equal(x, y):
+ x, y = np.asarray(x), np.asarray(y)
+ assert_array_equal(x.real, y.real)
+ assert_array_equal(x.imag, y.imag)
+
+ # positive powers
+ for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+ assert_complex_equal(np.power(zero, p), zero)
+
+ # zero power
+ assert_complex_equal(np.power(zero, 0), one)
+ with np.errstate(invalid="ignore"):
+ assert_complex_equal(np.power(zero, 0+1j), cnan)
+
+ # negative power
+ for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+ assert_complex_equal(np.power(zero, -p), cnan)
+ assert_complex_equal(np.power(zero, -1+0.2j), cnan)
+
+ def test_fast_power(self):
+ x = np.array([1, 2, 3], np.int16)
+ res = x**2.0
+ assert_((x**2.00001).dtype is res.dtype)
+ assert_array_equal(res, [1, 4, 9])
+ # check the inplace operation on the casted copy doesn't mess with x
+ assert_(not np.may_share_memory(res, x))
+ assert_array_equal(x, [1, 2, 3])
+
+ # Check that the fast path ignores 1-element not 0-d arrays
+ res = x ** np.array([[[2]]])
+ assert_equal(res.shape, (1, 1, 3))
+
+ def test_integer_power(self):
+ a = np.array([15, 15], 'i8')
+ b = np.power(a, a)
+ assert_equal(b, [437893890380859375, 437893890380859375])
+
+ def test_integer_power_with_integer_zero_exponent(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ arr = np.arange(-10, 10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ dtypes = np.typecodes['UnsignedInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ def test_integer_power_of_1(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(1, arr), np.ones_like(arr))
+
+ def test_integer_power_of_zero(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(1, 10, dtype=dt)
+ assert_equal(np.power(0, arr), np.zeros_like(arr))
+
+ def test_integer_to_negative_power(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ a = np.array([0, 1, 2, 3], dtype=dt)
+ b = np.array([0, 1, 2, -3], dtype=dt)
+ one = np.array(1, dtype=dt)
+ minusone = np.array(-1, dtype=dt)
+ assert_raises(ValueError, np.power, a, b)
+ assert_raises(ValueError, np.power, a, minusone)
+ assert_raises(ValueError, np.power, one, b)
+ assert_raises(ValueError, np.power, one, minusone)
+
+
+class TestFloat_power(object):
+ def test_type_conversion(self):
+ arg_type = '?bhilBHILefdgFDG'
+ res_type = 'ddddddddddddgDDG'
+ for dtin, dtout in zip(arg_type, res_type):
+ msg = "dtin: %s, dtout: %s" % (dtin, dtout)
+ arg = np.ones(1, dtype=dtin)
+ res = np.float_power(arg, arg)
+ assert_(res.dtype.name == np.dtype(dtout).name, msg)
+
+
+class TestLog2(object):
+ def test_log2_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_almost_equal(np.log2(xf), yf)
+
+ def test_log2_ints(self):
+ # a good log2 implementation should provide this,
+ # might fail on OS with bad libm
+ for i in range(1, 65):
+ v = np.log2(2.**i)
+ assert_equal(v, float(i), err_msg='at exponent %d' % i)
+
+ def test_log2_special(self):
+ assert_equal(np.log2(1.), 0.)
+ assert_equal(np.log2(np.inf), np.inf)
+ assert_(np.isnan(np.log2(np.nan)))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.log2(-1.)))
+ assert_(np.isnan(np.log2(-np.inf)))
+ assert_equal(np.log2(0.), -np.inf)
+ assert_(w[0].category is RuntimeWarning)
+ assert_(w[1].category is RuntimeWarning)
+ assert_(w[2].category is RuntimeWarning)
+
+
+class TestExp2(object):
+ def test_exp2_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_almost_equal(np.exp2(yf), xf)
+
+
+class TestLogAddExp2(_FilterInvalids):
+ # Need test for intermediate precisions
+ def test_logaddexp2_values(self):
+ x = [1, 2, 3, 4, 5]
+ y = [5, 4, 3, 2, 1]
+ z = [6, 6, 6, 6, 6]
+ for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+ xf = np.log2(np.array(x, dtype=dt))
+ yf = np.log2(np.array(y, dtype=dt))
+ zf = np.log2(np.array(z, dtype=dt))
+ assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
+
+ def test_logaddexp2_range(self):
+ x = [1000000, -1000000, 1000200, -1000200]
+ y = [1000200, -1000200, 1000000, -1000000]
+ z = [1000200, -1000000, 1000200, -1000000]
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_inf(self):
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ with np.errstate(invalid='raise'):
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
+ assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
+ assert_(np.isnan(np.logaddexp2(np.nan, 0)))
+ assert_(np.isnan(np.logaddexp2(0, np.nan)))
+ assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
+
+
+class TestLog(object):
+ def test_log_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ log2_ = 0.69314718055994530943
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)*log2_
+ assert_almost_equal(np.log(xf), yf)
+
+
+class TestExp(object):
+ def test_exp_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ log2_ = 0.69314718055994530943
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)*log2_
+ assert_almost_equal(np.exp(yf), xf)
+
+
+class TestLogAddExp(_FilterInvalids):
+ def test_logaddexp_values(self):
+ x = [1, 2, 3, 4, 5]
+ y = [5, 4, 3, 2, 1]
+ z = [6, 6, 6, 6, 6]
+ for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+ xf = np.log(np.array(x, dtype=dt))
+ yf = np.log(np.array(y, dtype=dt))
+ zf = np.log(np.array(z, dtype=dt))
+ assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
+
+ def test_logaddexp_range(self):
+ x = [1000000, -1000000, 1000200, -1000200]
+ y = [1000200, -1000200, 1000000, -1000000]
+ z = [1000200, -1000000, 1000200, -1000000]
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
+
+ def test_inf(self):
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ with np.errstate(invalid='raise'):
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
+ assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
+ assert_(np.isnan(np.logaddexp(np.nan, 0)))
+ assert_(np.isnan(np.logaddexp(0, np.nan)))
+ assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+
+ def test_reduce(self):
+ assert_equal(np.logaddexp.identity, -np.inf)
+ assert_equal(np.logaddexp.reduce([]), -np.inf)
+
+
+class TestLog1p(object):
+ def test_log1p(self):
+ assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
+ assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
+
+ def test_special(self):
+ with np.errstate(invalid="ignore", divide="ignore"):
+ assert_equal(ncu.log1p(np.nan), np.nan)
+ assert_equal(ncu.log1p(np.inf), np.inf)
+ assert_equal(ncu.log1p(-1.), -np.inf)
+ assert_equal(ncu.log1p(-2.), np.nan)
+ assert_equal(ncu.log1p(-np.inf), np.nan)
+
+
+class TestExpm1(object):
+ def test_expm1(self):
+ assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
+ assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
+
+ def test_special(self):
+ assert_equal(ncu.expm1(np.inf), np.inf)
+ assert_equal(ncu.expm1(0.), 0.)
+ assert_equal(ncu.expm1(-0.), -0.)
+ assert_equal(ncu.expm1(np.inf), np.inf)
+ assert_equal(ncu.expm1(-np.inf), -1.)
+
+
+class TestHypot(object):
+ def test_simple(self):
+ assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
+ assert_almost_equal(ncu.hypot(0, 0), 0)
+
+ def test_reduce(self):
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
+ assert_equal(ncu.hypot.reduce([]), 0.0)
+
+
+def assert_hypot_isnan(x, y):
+ with np.errstate(invalid='ignore'):
+ assert_(np.isnan(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
+
+
+def assert_hypot_isinf(x, y):
+ with np.errstate(invalid='ignore'):
+ assert_(np.isinf(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
+
+
+class TestHypotSpecialValues(object):
+ def test_nan_outputs(self):
+ assert_hypot_isnan(np.nan, np.nan)
+ assert_hypot_isnan(np.nan, 1)
+
+ def test_nan_outputs2(self):
+ assert_hypot_isinf(np.nan, np.inf)
+ assert_hypot_isinf(np.inf, np.nan)
+ assert_hypot_isinf(np.inf, 0)
+ assert_hypot_isinf(0, np.inf)
+ assert_hypot_isinf(np.inf, np.inf)
+ assert_hypot_isinf(np.inf, 23.0)
+
+ def test_no_fpe(self):
+ assert_no_warnings(ncu.hypot, np.inf, 0)
+
+
+def assert_arctan2_isnan(x, y):
+ assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispinf(x, y):
+ assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isninf(x, y):
+ assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispzero(x, y):
+ assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isnzero(x, y):
+ assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
+
+
+class TestArctan2SpecialValues(object):
+ def test_one_one(self):
+ # atan2(1, 1) returns pi/4.
+ assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
+
+ def test_zero_nzero(self):
+ # atan2(+-0, -0) returns +-pi.
+ assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
+ assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
+
+ def test_zero_pzero(self):
+ # atan2(+-0, +0) returns +-0.
+ assert_arctan2_ispzero(np.PZERO, np.PZERO)
+ assert_arctan2_isnzero(np.NZERO, np.PZERO)
+
+ def test_zero_negative(self):
+ # atan2(+-0, x) returns +-pi for x < 0.
+ assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
+ assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
+
+ def test_zero_positive(self):
+ # atan2(+-0, x) returns +-0 for x > 0.
+ assert_arctan2_ispzero(np.PZERO, 1)
+ assert_arctan2_isnzero(np.NZERO, 1)
+
+ def test_positive_zero(self):
+ # atan2(y, +-0) returns +pi/2 for y > 0.
+ assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
+
+ def test_negative_zero(self):
+ # atan2(y, +-0) returns -pi/2 for y < 0.
+ assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
+
+ def test_any_ninf(self):
+ # atan2(+-y, -infinity) returns +-pi for finite y > 0.
+ assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
+ assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
+
+ def test_any_pinf(self):
+ # atan2(+-y, +infinity) returns +-0 for finite y > 0.
+ assert_arctan2_ispzero(1, np.inf)
+ assert_arctan2_isnzero(-1, np.inf)
+
+ def test_inf_any(self):
+ # atan2(+-infinity, x) returns +-pi/2 for finite x.
+ assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
+
+ def test_inf_ninf(self):
+ # atan2(+-infinity, -infinity) returns +-3*pi/4.
+ assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
+
+ def test_inf_pinf(self):
+ # atan2(+-infinity, +infinity) returns +-pi/4.
+ assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
+
+ def test_nan_any(self):
+ # atan2(nan, x) returns nan for any x, including inf
+ assert_arctan2_isnan(np.nan, np.inf)
+ assert_arctan2_isnan(np.inf, np.nan)
+ assert_arctan2_isnan(np.nan, np.nan)
+
+
+class TestLdexp(object):
+ def _check_ldexp(self, tp):
+ assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
+ np.array(3, tp)), 16.)
+ assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
+ np.array(3, tp)), 16.)
+ assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
+ np.array(3, tp)), 16.)
+
+ def test_ldexp(self):
+ # The default Python int type should work
+ assert_almost_equal(ncu.ldexp(2., 3), 16.)
+ # The following int types should all be accepted
+ self._check_ldexp(np.int8)
+ self._check_ldexp(np.int16)
+ self._check_ldexp(np.int32)
+ self._check_ldexp('i')
+ self._check_ldexp('l')
+
+ def test_ldexp_overflow(self):
+ # silence warning emitted on overflow
+ with np.errstate(over="ignore"):
+ imax = np.iinfo(np.dtype('l')).max
+ imin = np.iinfo(np.dtype('l')).min
+ assert_equal(ncu.ldexp(2., imax), np.inf)
+ assert_equal(ncu.ldexp(2., imin), 0)
+
+
+class TestMaximum(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.maximum.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), np.nan)
+ assert_equal(func(tmp2), np.nan)
+
+ def test_reduce_complex(self):
+ assert_equal(np.maximum.reduce([1, 2j]), 1)
+ assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([nan, nan, nan])
+ assert_equal(np.maximum(arg1, arg2), out)
+
+ def test_object_nans(self):
+ # Multiple checks to give this a chance to
+ # fail if cmp is used instead of rich compare.
+ # Failure cannot be guaranteed.
+ for i in range(1):
+ x = np.array(float('nan'), object)
+ y = 1.0
+ z = np.array(float('nan'), object)
+ assert_(np.maximum(x, y) == 1.0)
+ assert_(np.maximum(z, y) == 1.0)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
+ assert_equal(np.maximum(arg1, arg2), out)
+
+ def test_object_array(self):
+ arg1 = np.arange(5, dtype=object)
+ arg2 = arg1 + 1
+ assert_equal(np.maximum(arg1, arg2), arg2)
+
+
+class TestMinimum(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.minimum.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), np.nan)
+ assert_equal(func(tmp2), np.nan)
+
+ def test_reduce_complex(self):
+ assert_equal(np.minimum.reduce([1, 2j]), 2j)
+ assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([nan, nan, nan])
+ assert_equal(np.minimum(arg1, arg2), out)
+
+ def test_object_nans(self):
+ # Multiple checks to give this a chance to
+ # fail if cmp is used instead of rich compare.
+ # Failure cannot be guaranteed.
+ for i in range(1):
+ x = np.array(float('nan'), object)
+ y = 1.0
+ z = np.array(float('nan'), object)
+ assert_(np.minimum(x, y) == 1.0)
+ assert_(np.minimum(z, y) == 1.0)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
+ assert_equal(np.minimum(arg1, arg2), out)
+
+ def test_object_array(self):
+ arg1 = np.arange(5, dtype=object)
+ arg2 = arg1 + 1
+ assert_equal(np.minimum(arg1, arg2), arg1)
+
+
+class TestFmax(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.fmax.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), 9)
+ assert_equal(func(tmp2), 9)
+
+ def test_reduce_complex(self):
+ assert_equal(np.fmax.reduce([1, 2j]), 1)
+ assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([0, 0, nan])
+ assert_equal(np.fmax(arg1, arg2), out)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
+ assert_equal(np.fmax(arg1, arg2), out)
+
+
+class TestFmin(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.fmin.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), 1)
+ assert_equal(func(tmp2), 1)
+
+ def test_reduce_complex(self):
+ assert_equal(np.fmin.reduce([1, 2j]), 2j)
+ assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([0, 0, nan])
+ assert_equal(np.fmin(arg1, arg2), out)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
+ assert_equal(np.fmin(arg1, arg2), out)
+
+
+class TestBool(object):
+ def test_exceptions(self):
+ a = np.ones(1, dtype=np.bool_)
+ assert_raises(TypeError, np.negative, a)
+ assert_raises(TypeError, np.positive, a)
+ assert_raises(TypeError, np.subtract, a, a)
+
+ def test_truth_table_logical(self):
+ # 2, 3 and 4 serves as true values
+ input1 = [0, 0, 3, 2]
+ input2 = [0, 4, 0, 2]
+
+ typecodes = (np.typecodes['AllFloat']
+ + np.typecodes['AllInteger']
+ + '?') # boolean
+ for dtype in map(np.dtype, typecodes):
+ arg1 = np.asarray(input1, dtype=dtype)
+ arg2 = np.asarray(input2, dtype=dtype)
+
+ # OR
+ out = [False, True, True, True]
+ for func in (np.logical_or, np.maximum):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+ # AND
+ out = [False, False, False, True]
+ for func in (np.logical_and, np.minimum):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+ # XOR
+ out = [False, True, True, False]
+ for func in (np.logical_xor, np.not_equal):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+
+ def test_truth_table_bitwise(self):
+ arg1 = [False, False, True, True]
+ arg2 = [False, True, False, True]
+
+ out = [False, True, True, True]
+ assert_equal(np.bitwise_or(arg1, arg2), out)
+
+ out = [False, False, False, True]
+ assert_equal(np.bitwise_and(arg1, arg2), out)
+
+ out = [False, True, True, False]
+ assert_equal(np.bitwise_xor(arg1, arg2), out)
+
+ def test_reduce(self):
+ none = np.array([0, 0, 0, 0], bool)
+ some = np.array([1, 0, 1, 1], bool)
+ every = np.array([1, 1, 1, 1], bool)
+ empty = np.array([], bool)
+
+ arrs = [none, some, every, empty]
+
+ for arr in arrs:
+ assert_equal(np.logical_and.reduce(arr), all(arr))
+
+ for arr in arrs:
+ assert_equal(np.logical_or.reduce(arr), any(arr))
+
+ for arr in arrs:
+ assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
+
+
+class TestBitwiseUFuncs(object):
+
+ bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
+
+ def test_values(self):
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1], dtype=dt)
+ msg = "dt = '%s'" % dt.char
+
+ assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
+
+ assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
+ assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
+
+ assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
+ assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
+
+ assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
+
+ def test_types(self):
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1], dtype=dt)
+ msg = "dt = '%s'" % dt.char
+
+ assert_(np.bitwise_not(zeros).dtype == dt, msg)
+ assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
+ assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
+ assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
+
+ def test_identity(self):
+ assert_(np.bitwise_or.identity == 0, 'bitwise_or')
+ assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
+ assert_(np.bitwise_and.identity == -1, 'bitwise_and')
+
+ def test_reduction(self):
+ binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
+
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1], dtype=dt)
+ for f in binary_funcs:
+ msg = "dt: '%s', f: '%s'" % (dt, f)
+ assert_equal(f.reduce(zeros), zeros, err_msg=msg)
+ assert_equal(f.reduce(ones), ones, err_msg=msg)
+
+ # Test empty reduction, no object dtype
+ for dt in self.bitwise_types[:-1]:
+ # No object array types
+ empty = np.array([], dtype=dt)
+ for f in binary_funcs:
+ msg = "dt: '%s', f: '%s'" % (dt, f)
+ tgt = np.array(f.identity, dtype=dt)
+ res = f.reduce(empty)
+ assert_equal(res, tgt, err_msg=msg)
+ assert_(res.dtype == tgt.dtype, msg)
+
+ # Empty object arrays use the identity. Note that the types may
+ # differ, the actual type used is determined by the assign_identity
+ # function and is not the same as the type returned by the identity
+ # method.
+ for f in binary_funcs:
+ msg = "dt: '%s'" % (f,)
+ empty = np.array([], dtype=object)
+ tgt = f.identity
+ res = f.reduce(empty)
+ assert_equal(res, tgt, err_msg=msg)
+
+ # Non-empty object arrays do not use the identity
+ for f in binary_funcs:
+ msg = "dt: '%s'" % (f,)
+ btype = np.array([True], dtype=object)
+ assert_(type(f.reduce(btype)) is bool, msg)
+
+
+class TestInt(object):
+ def test_logical_not(self):
+ x = np.ones(10, dtype=np.int16)
+ o = np.ones(10 * 2, dtype=bool)
+ tgt = o.copy()
+ tgt[::2] = False
+ os = o[::2]
+ assert_array_equal(np.logical_not(x, out=os), False)
+ assert_array_equal(o, tgt)
+
+
+class TestFloatingPoint(object):
+ def test_floating_point(self):
+ assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
+
+
+class TestDegrees(object):
+ def test_degrees(self):
+ assert_almost_equal(ncu.degrees(np.pi), 180.0)
+ assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
+
+
+class TestRadians(object):
+ def test_radians(self):
+ assert_almost_equal(ncu.radians(180.0), np.pi)
+ assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
+
+
+class TestHeavside(object):
+ def test_heaviside(self):
+ x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
+ expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
+ expected1 = expectedhalf.copy()
+ expected1[0, 2] = 1
+
+ h = ncu.heaviside(x, 0.5)
+ assert_equal(h, expectedhalf)
+
+ h = ncu.heaviside(x, 1.0)
+ assert_equal(h, expected1)
+
+ x = x.astype(np.float32)
+
+ h = ncu.heaviside(x, np.float32(0.5))
+ assert_equal(h, expectedhalf.astype(np.float32))
+
+ h = ncu.heaviside(x, np.float32(1.0))
+ assert_equal(h, expected1.astype(np.float32))
+
+
+class TestSign(object):
+ def test_sign(self):
+ a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
+ out = np.zeros(a.shape)
+ tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
+
+ with np.errstate(invalid='ignore'):
+ res = ncu.sign(a)
+ assert_equal(res, tgt)
+ res = ncu.sign(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ def test_sign_dtype_object(self):
+ # In reference to github issue #6229
+
+ foo = np.array([-.1, 0, .1])
+ a = np.sign(foo.astype(object))
+ b = np.sign(foo)
+
+ assert_array_equal(a, b)
+
+ def test_sign_dtype_nan_object(self):
+ # In reference to github issue #6229
+ def test_nan():
+ foo = np.array([np.nan])
+ # FIXME: a not used
+ a = np.sign(foo.astype(object))
+
+ assert_raises(TypeError, test_nan)
+
+class TestMinMax(object):
+ def test_minmax_blocked(self):
+ # simd tests on max/min, test all alignments, slow but important
+ # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
+ for dt, sz in [(np.float32, 15), (np.float64, 7)]:
+ for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+ max_size=sz):
+ for i in range(inp.size):
+ inp[:] = np.arange(inp.size, dtype=dt)
+ inp[i] = np.nan
+ emsg = lambda: '%r\n%s' % (inp, msg)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ assert_(np.isnan(inp.max()), msg=emsg)
+ assert_(np.isnan(inp.min()), msg=emsg)
+
+ inp[i] = 1e10
+ assert_equal(inp.max(), 1e10, err_msg=msg)
+ inp[i] = -1e10
+ assert_equal(inp.min(), -1e10, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_equal(d.max(), d[0])
+ assert_equal(d.min(), d[0])
+
+ def test_reduce_reorder(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are not emitted
+ for n in (2, 4, 8, 16, 32):
+ for dt in (np.float32, np.float16, np.complex64):
+ for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
+ assert_equal(np.min(r), np.nan)
+
+ def test_minimize_no_warns(self):
+ a = np.minimum(np.nan, 1)
+ assert_equal(a, np.nan)
+
+
+class TestAbsoluteNegative(object):
+ def test_abs_neg_blocked(self):
+ # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
+ for dt, sz in [(np.float32, 11), (np.float64, 5)]:
+ for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+ max_size=sz):
+ tgt = [ncu.absolute(i) for i in inp]
+ np.absolute(inp, out=out)
+ assert_equal(out, tgt, err_msg=msg)
+ assert_((out >= 0).all())
+
+ tgt = [-1*(i) for i in inp]
+ np.negative(inp, out=out)
+ assert_equal(out, tgt, err_msg=msg)
+
+ for v in [np.nan, -np.inf, np.inf]:
+ for i in range(inp.size):
+ d = np.arange(inp.size, dtype=dt)
+ inp[:] = -d
+ inp[i] = v
+ d[i] = -v if v == -np.inf else v
+ assert_array_equal(np.abs(inp), d, err_msg=msg)
+ np.abs(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
+
+ assert_array_equal(-inp, -1*inp, err_msg=msg)
+ d = -1 * inp
+ np.negative(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_equal(np.abs(d), d)
+ assert_equal(np.negative(d), -d)
+ np.negative(d, out=d)
+ np.negative(np.ones_like(d), out=d)
+ np.abs(d, out=d)
+ np.abs(np.ones_like(d), out=d)
+
+
+class TestPositive(object):
+ def test_valid(self):
+ valid_dtypes = [int, float, complex, object]
+ for dtype in valid_dtypes:
+ x = np.arange(5, dtype=dtype)
+ result = np.positive(x)
+ assert_equal(x, result, err_msg=str(dtype))
+
+ def test_invalid(self):
+ with assert_raises(TypeError):
+ np.positive(True)
+ with assert_raises(TypeError):
+ np.positive(np.datetime64('2000-01-01'))
+ with assert_raises(TypeError):
+ np.positive(np.array(['foo'], dtype=str))
+ with assert_raises(TypeError):
+ np.positive(np.array(['bar'], dtype=object))
+
+
+class TestSpecialMethods(object):
+ def test_wrap(self):
+
+ class with_wrap(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ r = with_wrap()
+ r.arr = arr
+ r.context = context
+ return r
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x.arr, np.zeros(1))
+ func, args, i = x.context
+ assert_(func is ncu.minimum)
+ assert_equal(len(args), 2)
+ assert_equal(args[0], a)
+ assert_equal(args[1], a)
+ assert_equal(i, 0)
+
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.empty(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are calld by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+
+ def test_wrap_with_iterable(self):
+ # test fix for bug #1026:
+
+ class with_wrap(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls):
+ return np.asarray(1).view(cls).copy()
+
+ def __array_wrap__(self, arr, context):
+ return arr.view(type(self))
+
+ a = with_wrap()
+ x = ncu.multiply(a, (1, 2, 3))
+ assert_(isinstance(x, with_wrap))
+ assert_array_equal(x, np.array((1, 2, 3)))
+
+ def test_priority_with_scalar(self):
+ # test fix for bug #826:
+
+ class A(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls):
+ return np.asarray(1.0, 'float64').view(cls).copy()
+
+ a = A()
+ x = np.float64(1)*a
+ assert_(isinstance(x, A))
+ assert_array_equal(x, np.array(1))
+
+ def test_old_wrap(self):
+
+ class with_wrap(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr):
+ r = with_wrap()
+ r.arr = arr
+ return r
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x.arr, np.zeros(1))
+
+ def test_priority(self):
+
+ class A(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ r = type(self)()
+ r.arr = arr
+ r.context = context
+ return r
+
+ class B(A):
+ __array_priority__ = 20.
+
+ class C(A):
+ __array_priority__ = 40.
+
+ x = np.zeros(1)
+ a = A()
+ b = B()
+ c = C()
+ f = ncu.minimum
+ assert_(type(f(x, x)) is np.ndarray)
+ assert_(type(f(x, a)) is A)
+ assert_(type(f(x, b)) is B)
+ assert_(type(f(x, c)) is C)
+ assert_(type(f(a, x)) is A)
+ assert_(type(f(b, x)) is B)
+ assert_(type(f(c, x)) is C)
+
+ assert_(type(f(a, a)) is A)
+ assert_(type(f(a, b)) is B)
+ assert_(type(f(b, a)) is B)
+ assert_(type(f(b, b)) is B)
+ assert_(type(f(b, c)) is C)
+ assert_(type(f(c, b)) is C)
+ assert_(type(f(c, c)) is C)
+
+ assert_(type(ncu.exp(a) is A))
+ assert_(type(ncu.exp(b) is B))
+ assert_(type(ncu.exp(c) is C))
+
+ def test_failing_wrap(self):
+
+ class A(object):
+ def __array__(self):
+ return np.zeros(2)
+
+ def __array_wrap__(self, arr, context):
+ raise RuntimeError
+
+ a = A()
+ assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum.reduce, a)
+
+ def test_failing_out_wrap(self):
+
+ singleton = np.array([1.0])
+
+ class Ok(np.ndarray):
+ def __array_wrap__(self, obj):
+ return singleton
+
+ class Bad(np.ndarray):
+ def __array_wrap__(self, obj):
+ raise RuntimeError
+
+ ok = np.empty(1).view(Ok)
+ bad = np.empty(1).view(Bad)
+
+ # double-free (segfault) of "ok" if "bad" raises an exception
+ for i in range(10):
+ assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
+
+ def test_none_wrap(self):
+ # Tests that issue #8507 is resolved. Previously, this would segfault
+
+ class A(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context=None):
+ return None
+
+ a = A()
+ assert_equal(ncu.maximum(a, a), None)
+
+ def test_default_prepare(self):
+
+ class with_wrap(object):
+ __array_priority__ = 10
+
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ return arr
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x, np.zeros(1))
+ assert_equal(type(x), np.ndarray)
+
+ def test_prepare(self):
+
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+
+ def __array_prepare__(self, arr, context):
+ # make sure we can return a new
+ return np.array(arr).view(type=with_prepare)
+
+ a = np.array(1).view(type=with_prepare)
+ x = np.add(a, a)
+ assert_equal(x, np.array(2))
+ assert_equal(type(x), with_prepare)
+
+ def test_prepare_out(self):
+
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+
+ def __array_prepare__(self, arr, context):
+ return np.array(arr).view(type=with_prepare)
+
+ a = np.array([1]).view(type=with_prepare)
+ x = np.add(a, a, a)
+ # Returned array is new, because of the strange
+ # __array_prepare__ above
+ assert_(not np.shares_memory(x, a))
+ assert_equal(x, np.array([2]))
+ assert_equal(type(x), with_prepare)
+
+ def test_failing_prepare(self):
+
+ class A(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_prepare__(self, arr, context=None):
+ raise RuntimeError
+
+ a = A()
+ assert_raises(RuntimeError, ncu.maximum, a, a)
+
+ def test_array_with_context(self):
+
+ class A(object):
+ def __array__(self, dtype=None, context=None):
+ func, args, i = context
+ self.func = func
+ self.args = args
+ self.i = i
+ return np.zeros(1)
+
+ class B(object):
+ def __array__(self, dtype=None):
+ return np.zeros(1, dtype)
+
+ class C(object):
+ def __array__(self):
+ return np.zeros(1)
+
+ a = A()
+ ncu.maximum(np.zeros(1), a)
+ assert_(a.func is ncu.maximum)
+ assert_equal(a.args[0], 0)
+ assert_(a.args[1] is a)
+ assert_(a.i == 1)
+ assert_equal(ncu.maximum(a, B()), 0)
+ assert_equal(ncu.maximum(a, C()), 0)
+
+ def test_ufunc_override(self):
+ # check override works even with instance with high priority.
+ class A(object):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return self, func, method, inputs, kwargs
+
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
+
+ a = A()
+ b = np.array([1]).view(MyNDArray)
+ res0 = np.multiply(a, b)
+ res1 = np.multiply(b, b, out=a)
+
+ # self
+ assert_equal(res0[0], a)
+ assert_equal(res1[0], a)
+ assert_equal(res0[1], np.multiply)
+ assert_equal(res1[1], np.multiply)
+ assert_equal(res0[2], '__call__')
+ assert_equal(res1[2], '__call__')
+ assert_equal(res0[3], (a, b))
+ assert_equal(res1[3], (b, b))
+ assert_equal(res0[4], {})
+ assert_equal(res1[4], {'out': (a,)})
+
+ def test_ufunc_override_mro(self):
+
+ # Some multi arg functions for testing.
+ def tres_mul(a, b, c):
+ return a * b * c
+
+ def quatro_mul(a, b, c, d):
+ return a * b * c * d
+
+ # Make these into ufuncs.
+ three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
+ four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
+
+ class A(object):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "A"
+
+ class ASub(A):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "ASub"
+
+ class B(object):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "B"
+
+ class C(object):
+ def __init__(self):
+ self.count = 0
+
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
+ return NotImplemented
+
+ class CSub(C):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
+ return NotImplemented
+
+ a = A()
+ a_sub = ASub()
+ b = B()
+ c = C()
+
+ # Standard
+ res = np.multiply(a, a_sub)
+ assert_equal(res, "ASub")
+ res = np.multiply(a_sub, b)
+ assert_equal(res, "ASub")
+
+ # With 1 NotImplemented
+ res = np.multiply(c, a)
+ assert_equal(res, "A")
+ assert_equal(c.count, 1)
+ # Check our counter works, so we can trust tests below.
+ res = np.multiply(c, a)
+ assert_equal(c.count, 2)
+
+ # Both NotImplemented.
+ c = C()
+ c_sub = CSub()
+ assert_raises(TypeError, np.multiply, c, c_sub)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, np.multiply, c_sub, c)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, c, c)
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, 2, c)
+ assert_equal(c.count, 1)
+
+ # Ternary testing.
+ assert_equal(three_mul_ufunc(a, 1, 2), "A")
+ assert_equal(three_mul_ufunc(1, a, 2), "A")
+ assert_equal(three_mul_ufunc(1, 2, a), "A")
+
+ assert_equal(three_mul_ufunc(a, a, 6), "A")
+ assert_equal(three_mul_ufunc(a, 2, a), "A")
+ assert_equal(three_mul_ufunc(a, 2, b), "A")
+ assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
+ assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
+ c.count = 0
+ assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
+ assert_equal(c.count, 0)
+
+ c.count = 0
+ assert_equal(three_mul_ufunc(a, b, c), "A")
+ assert_equal(c.count, 0)
+ c_sub.count = 0
+ assert_equal(three_mul_ufunc(a, b, c_sub), "A")
+ assert_equal(c_sub.count, 0)
+ assert_equal(three_mul_ufunc(1, 2, b), "B")
+
+ assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
+ assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
+ assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
+
+ # Quaternary testing.
+ assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
+ assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
+
+ assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, a, 2, b), "A")
+ assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
+ assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
+ assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
+
+ c = C()
+ c_sub = CSub()
+ assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ c2 = C()
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ assert_equal(c2.count, 0)
+ c.count = c2.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 0)
+ assert_equal(c2.count, 1)
+
+ def test_ufunc_override_methods(self):
+
+ class A(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return self, ufunc, method, inputs, kwargs
+
+ # __call__
+ a = A()
+ res = np.multiply.__call__(1, a, foo='bar', answer=42)
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (1, a))
+ assert_equal(res[4], {'foo': 'bar', 'answer': 42})
+
+ # __call__, wrong args
+ assert_raises(TypeError, np.multiply, a)
+ assert_raises(TypeError, np.multiply, a, a, a, a)
+ assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
+ assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
+
+ # reduce, positional args
+ res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduce')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0'})
+
+ # reduce, kwargs
+ res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
+ keepdims='keep0', initial='init0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduce')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0',
+ 'initial': 'init0'})
+
+ # reduce, output equal to None removed, but not other explicit ones,
+ # even if they are at their default value.
+ res = np.multiply.reduce(a, 0, None, None, False)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+ assert_equal(res[4], {'axis': 0, 'keepdims': True})
+ res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
+ # np._NoValue ignored for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ # None kept for initial.
+ res = np.multiply.reduce(a, 0, None, None, False, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
+
+ # reduce, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, out=())
+ assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
+
+ # accumulate, pos args
+ res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'accumulate')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # accumulate, kwargs
+ res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
+ out='out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'accumulate')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # accumulate, output equal to None removed.
+ res = np.multiply.accumulate(a, 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+ assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+ res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # accumulate, wrong args
+ assert_raises(ValueError, np.multiply.accumulate, a, out=())
+ assert_raises(ValueError, np.multiply.accumulate, a,
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.accumulate, a,
+ 'axis0', axis='axis0')
+
+ # reduceat, pos args
+ res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduceat')
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # reduceat, kwargs
+ res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
+ out='out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduceat')
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # reduceat, output equal to None removed.
+ res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+ assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+ res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # reduceat, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+ 'axis0', axis='axis0')
+
+ # outer
+ res = np.multiply.outer(a, 42)
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'outer')
+ assert_equal(res[3], (a, 42))
+ assert_equal(res[4], {})
+
+ # outer, wrong args
+ assert_raises(TypeError, np.multiply.outer, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, a, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
+
+ # at
+ res = np.multiply.at(a, [4, 2], 'b0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'at')
+ assert_equal(res[3], (a, [4, 2], 'b0'))
+
+ # at, wrong args
+ assert_raises(TypeError, np.multiply.at, a)
+ assert_raises(TypeError, np.multiply.at, a, a, a, a)
+
+ def test_ufunc_override_out(self):
+
+ class A(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return kwargs
+
+ class B(object):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return kwargs
+
+ a = A()
+ b = B()
+ res0 = np.multiply(a, b, 'out_arg')
+ res1 = np.multiply(a, b, out='out_arg')
+ res2 = np.multiply(2, b, 'out_arg')
+ res3 = np.multiply(3, b, out='out_arg')
+ res4 = np.multiply(a, 4, 'out_arg')
+ res5 = np.multiply(a, 5, out='out_arg')
+
+ assert_equal(res0['out'][0], 'out_arg')
+ assert_equal(res1['out'][0], 'out_arg')
+ assert_equal(res2['out'][0], 'out_arg')
+ assert_equal(res3['out'][0], 'out_arg')
+ assert_equal(res4['out'][0], 'out_arg')
+ assert_equal(res5['out'][0], 'out_arg')
+
+ # ufuncs with multiple output modf and frexp.
+ res6 = np.modf(a, 'out0', 'out1')
+ res7 = np.frexp(a, 'out0', 'out1')
+ assert_equal(res6['out'][0], 'out0')
+ assert_equal(res6['out'][1], 'out1')
+ assert_equal(res7['out'][0], 'out0')
+ assert_equal(res7['out'][1], 'out1')
+
+ # While we're at it, check that default output is never passed on.
+ assert_(np.sin(a, None) == {})
+ assert_(np.sin(a, out=None) == {})
+ assert_(np.sin(a, out=(None,)) == {})
+ assert_(np.modf(a, None) == {})
+ assert_(np.modf(a, None, None) == {})
+ assert_(np.modf(a, out=(None, None)) == {})
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_(np.modf(a, out=None) == {})
+ assert_(w[0].category is DeprecationWarning)
+
+ # don't give positional and output argument, or too many arguments.
+ # wrong number of arguments in the tuple is an error too.
+ assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
+ assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
+ assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+ assert_raises(ValueError, np.multiply, a, out=())
+ assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
+ assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
+ assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+ assert_raises(ValueError, np.modf, a, out=('one',))
+
+ def test_ufunc_override_exception(self):
+
+ class A(object):
+ def __array_ufunc__(self, *a, **kwargs):
+ raise ValueError("oops")
+
+ a = A()
+ assert_raises(ValueError, np.negative, 1, out=a)
+ assert_raises(ValueError, np.negative, a)
+ assert_raises(ValueError, np.divide, 1., a)
+
+ def test_ufunc_override_not_implemented(self):
+
+ class A(object):
+ def __array_ufunc__(self, *args, **kwargs):
+ return NotImplemented
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(, '__call__', <*>): 'A'")
+ with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+ np.negative(A())
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(, '__call__', <*>,